You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2017/07/25 18:02:43 UTC

[01/50] [abbrv] hadoop git commit: YARN-6205. Default lifetime for native services app is invalid. Contributed by Billie Rinaldi [Forced Update!]

Repository: hadoop
Updated Branches:
  refs/heads/yarn-native-services e79f4f5a9 -> cc1dd1e5f (forced update)


YARN-6205. Default lifetime for native services app is invalid. Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6142f2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6142f2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6142f2a

Branch: refs/heads/yarn-native-services
Commit: c6142f2aae8e42abd8272d2c0d4c9aecac0189dc
Parents: 0f6652d
Author: Gour Saha <go...@apache.org>
Authored: Tue Feb 21 09:04:04 2017 -0800
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:24 2017 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/slider/client/SliderClient.java       | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6142f2a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index e84809a..00e2b62 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -2070,7 +2070,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     amLauncher.setKeepContainersOverRestarts(true);
     // set lifetime in submission context;
     Map<ApplicationTimeoutType, Long> appTimeout = new HashMap<>();
-    if (lifetime >= 0) {
+    if (lifetime > 0) {
       appTimeout.put(ApplicationTimeoutType.LIFETIME, lifetime);
     }
     amLauncher.submissionContext.setApplicationTimeouts(appTimeout);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: YARN-6655. Validate yarn native services application submission side to ensure that the hostname should be less than 63 characters. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
YARN-6655. Validate yarn native services application submission side to ensure that the hostname should be less than 63 characters. Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40545156
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40545156
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40545156

Branch: refs/heads/yarn-native-services
Commit: 405451567d0f76d9d5b0b200dfe80a1fb4379f9d
Parents: 14b8371
Author: Jian He <ji...@apache.org>
Authored: Thu Jun 29 13:11:06 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:26 2017 -0700

----------------------------------------------------------------------
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |   4 +-
 .../org/apache/slider/client/SliderClient.java  |   9 +-
 .../apache/slider/common/tools/SliderUtils.java |   1 +
 .../slider/util/RestApiErrorMessages.java       |   9 +-
 .../org/apache/slider/util/ServiceApiUtil.java  |  24 +++-
 .../core/conf/TestConfigurationResolve.java     |  10 +-
 .../slider/core/conf/TestExampleAppJson.java    |   4 +-
 .../model/mock/BaseMockAppStateTest.java        |   2 +-
 .../apache/slider/utils/TestServiceApiUtil.java | 124 +++++++++++++++----
 .../registry/client/api/RegistryConstants.java  |   5 +
 10 files changed, 155 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40545156/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
index 05aad32..2f090a2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -153,7 +153,7 @@ definitions:
     properties:
       name:
         type: string
-        description: A unique application name.
+        description: A unique application name. If Registry DNS is enabled, the max length is 63 characters.
       id:
         type: string
         description: A unique application id.
@@ -255,7 +255,7 @@ definitions:
     properties:
       name:
         type: string
-        description: Name of the application component (mandatory).
+        description: Name of the application component (mandatory). If Registry DNS is enabled, the max length is 63 characters. If unique component support is enabled, the max length is lowered to 44 characters.
       dependencies:
         type: array
         items:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40545156/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index a3ba8c0..e261a8d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -637,7 +637,8 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   public int actionBuild(Application application) throws YarnException,
       IOException {
     Path appDir = checkAppNotExistOnHdfs(application);
-    ServiceApiUtil.validateAndResolveApplication(application, sliderFileSystem);
+    ServiceApiUtil.validateAndResolveApplication(application,
+        sliderFileSystem, getConfig());
     persistApp(appDir, application);
     deployedClusterName = application.getName();
     return EXIT_SUCCESS;
@@ -647,7 +648,8 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
       throws IOException, YarnException {
     String appName = application.getName();
     validateClusterName(appName);
-    ServiceApiUtil.validateAndResolveApplication(application, sliderFileSystem);
+    ServiceApiUtil.validateAndResolveApplication(application,
+        sliderFileSystem, getConfig());
     verifyNoLiveApp(appName, "Create");
     Path appDir = checkAppNotExistOnHdfs(application);
 
@@ -1778,7 +1780,8 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     Path appDir = checkAppExistOnHdfs(appName);
     Application application = ServiceApiUtil.loadApplication(sliderFileSystem,
         appName);
-    ServiceApiUtil.validateAndResolveApplication(application, sliderFileSystem);
+    ServiceApiUtil.validateAndResolveApplication(application,
+        sliderFileSystem, getConfig());
     // see if it is actually running and bail out;
     verifyNoLiveApp(appName, "Thaw");
     ApplicationId appId = submitApp(application);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40545156/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
index 6dc51ec..2e1236d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ExitUtil;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40545156/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
index 3033537..74f7e06 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
@@ -21,8 +21,13 @@ public interface RestApiErrorMessages {
   String ERROR_APPLICATION_NAME_INVALID =
       "Application name is either empty or not provided";
   String ERROR_APPLICATION_NAME_INVALID_FORMAT =
-      "Application name %s is not valid - only lower case letters, digits,"
-          + " underscore and hyphen are allowed";
+      "Application name %s is not valid - only lower case letters, digits, " +
+          "underscore and hyphen are allowed, and the name must be no more " +
+          "than 63 characters";
+  String ERROR_COMPONENT_NAME_INVALID =
+      "Component name must be no more than %s characters: %s";
+  String ERROR_USER_NAME_INVALID =
+      "User name must be no more than 63 characters";
 
   String ERROR_APPLICATION_NOT_RUNNING = "Application not running";
   String ERROR_APPLICATION_DOES_NOT_EXIST = "Application not found";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40545156/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
index e977727..3da6e15 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
@@ -22,6 +22,8 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.Artifact;
 import org.apache.slider.api.resource.Component;
@@ -60,12 +62,22 @@ public class ServiceApiUtil {
 
   @VisibleForTesting
   public static void validateAndResolveApplication(Application application,
-      SliderFileSystem fs) throws IOException {
+      SliderFileSystem fs, org.apache.hadoop.conf.Configuration conf) throws
+      IOException {
+    boolean dnsEnabled = conf.getBoolean(RegistryConstants.KEY_DNS_ENABLED,
+        RegistryConstants.DEFAULT_DNS_ENABLED);
+    if (dnsEnabled && RegistryUtils.currentUser().length() > RegistryConstants
+        .MAX_FQDN_LABEL_LENGTH) {
+      throw new IllegalArgumentException(RestApiErrorMessages
+          .ERROR_USER_NAME_INVALID);
+    }
     if (StringUtils.isEmpty(application.getName())) {
       throw new IllegalArgumentException(
           RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID);
     }
-    if (!SliderUtils.isClusternameValid(application.getName())) {
+    if (!SliderUtils.isClusternameValid(application.getName()) || (dnsEnabled
+        && application.getName().length() > RegistryConstants
+        .MAX_FQDN_LABEL_LENGTH)) {
       throw new IllegalArgumentException(String.format(
           RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID_FORMAT,
           application.getName()));
@@ -108,6 +120,14 @@ public class ServiceApiUtil {
     List<Component> componentsToRemove = new ArrayList<>();
     List<Component> componentsToAdd = new ArrayList<>();
     for (Component comp : application.getComponents()) {
+      int maxCompLength = RegistryConstants.MAX_FQDN_LABEL_LENGTH;
+      if (comp.getUniqueComponentSupport()) {
+        maxCompLength = maxCompLength - Long.toString(Long.MAX_VALUE).length();
+      }
+      if (dnsEnabled && comp.getName().length() > maxCompLength) {
+        throw new IllegalArgumentException(String.format(RestApiErrorMessages
+            .ERROR_COMPONENT_NAME_INVALID, maxCompLength, comp.getName()));
+      }
       if (componentNames.contains(comp.getName())) {
         throw new IllegalArgumentException("Component name collision: " +
             comp.getName());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40545156/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfigurationResolve.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfigurationResolve.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfigurationResolve.java
index 5f5df70..78dd669 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfigurationResolve.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfigurationResolve.java
@@ -20,6 +20,7 @@ package org.apache.slider.core.conf;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.ConfigFile;
 import org.apache.slider.api.resource.ConfigFile.TypeEnum;
@@ -95,7 +96,8 @@ public class TestConfigurationResolve extends Assert {
     expect(sfs.buildClusterDirPath(anyObject())).andReturn(
         new Path("cluster_dir_path")).anyTimes();
     replay(sfs, mockFs);
-    ServiceApiUtil.validateAndResolveApplication(orig, sfs);
+    ServiceApiUtil.validateAndResolveApplication(orig, sfs, new
+        YarnConfiguration());
 
     global = orig.getConfiguration();
     LOG.info("global = {}", global);
@@ -179,7 +181,8 @@ public class TestConfigurationResolve extends Assert {
         new Path("cluster_dir_path")).anyTimes();
     replay(sfs, mockFs);
     Application ext = ExampleAppJson.loadResource(APP_JSON);
-    ServiceApiUtil.validateAndResolveApplication(ext, sfs);
+    ServiceApiUtil.validateAndResolveApplication(ext, sfs, new
+        YarnConfiguration());
     reset(sfs, mockFs);
 
     // perform the resolution on original application
@@ -192,7 +195,8 @@ public class TestConfigurationResolve extends Assert {
         .anyTimes();
     replay(sfs, mockFs, jsonSerDeser);
     ServiceApiUtil.setJsonSerDeser(jsonSerDeser);
-    ServiceApiUtil.validateAndResolveApplication(orig, sfs);
+    ServiceApiUtil.validateAndResolveApplication(orig, sfs, new
+        YarnConfiguration());
 
     global = orig.getConfiguration();
     assertEquals(0, global.getProperties().size());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40545156/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestExampleAppJson.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestExampleAppJson.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestExampleAppJson.java
index 09096d0..9aeefee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestExampleAppJson.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestExampleAppJson.java
@@ -20,6 +20,7 @@ package org.apache.slider.core.conf;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.util.ServiceApiUtil;
@@ -71,7 +72,8 @@ public class TestExampleAppJson extends Assert {
           new Path("cluster_dir_path")).anyTimes();
       replay(sfs, mockFs);
 
-      ServiceApiUtil.validateAndResolveApplication(application, sfs);
+      ServiceApiUtil.validateAndResolveApplication(application, sfs,
+          new YarnConfiguration());
     } catch (Exception e) {
       throw new Exception("exception loading " + resource + ":" + e.toString());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40545156/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
index 5af87f9..db32c79 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
@@ -133,7 +133,7 @@ public abstract class BaseMockAppStateTest extends SliderTestBase implements
     AppStateBindingInfo binding = new AppStateBindingInfo();
     binding.application = buildApplication();
     ServiceApiUtil.validateAndResolveApplication(binding.application,
-        sliderFileSystem);
+        sliderFileSystem, SliderUtils.createConfiguration());
     //binding.roles = new ArrayList<>(factory.ROLES);
     binding.fs = fs;
     binding.historyPath = historyPath;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40545156/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
index d7a9cfd..889cc04 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
@@ -19,6 +19,8 @@ package org.apache.slider.utils;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.Artifact;
 import org.apache.slider.api.resource.Component;
@@ -29,6 +31,7 @@ import org.apache.slider.util.RestApiConstants;
 import org.apache.slider.util.RestApiErrorMessages;
 import org.apache.slider.util.ServiceApiUtil;
 import org.junit.Assert;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -61,26 +64,42 @@ public class TestServiceApiUtil {
   private static final String NO_EXCEPTION_PREFIX = "Should not have thrown " +
       "exception: ";
 
+  private static final String LEN_64_STR =
+      "abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz01";
+
+  private static final YarnConfiguration CONF_DEFAULT_DNS = new
+      YarnConfiguration();
+  private static final YarnConfiguration CONF_DNS_ENABLED = new
+      YarnConfiguration();
+
+  @BeforeClass
+  public static void init() {
+    CONF_DNS_ENABLED.setBoolean(RegistryConstants.KEY_DNS_ENABLED, true);
+  }
+
   @Test(timeout = 90000)
   public void testResourceValidation() throws Exception {
+    assertEquals(RegistryConstants.MAX_FQDN_LABEL_LENGTH + 1, LEN_64_STR
+        .length());
+
     SliderFileSystem sfs = initMock(null);
 
     Application app = new Application();
 
     // no name
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
       Assert.fail(EXCEPTION_PREFIX + "application with no name");
     } catch (IllegalArgumentException e) {
       assertEquals(ERROR_APPLICATION_NAME_INVALID, e.getMessage());
     }
 
     // bad format name
-    String[] badNames = {"4finance", "Finance", "finance@home"};
+    String[] badNames = {"4finance", "Finance", "finance@home", LEN_64_STR};
     for (String badName : badNames) {
       app.setName(badName);
       try {
-        ServiceApiUtil.validateAndResolveApplication(app, sfs);
+        ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
         Assert.fail(EXCEPTION_PREFIX + "application with bad name " + badName);
       } catch (IllegalArgumentException e) {
         assertEquals(String.format(
@@ -89,9 +108,20 @@ public class TestServiceApiUtil {
     }
 
     // launch command not specified
-    app.setName("finance_home");
+    app.setName(LEN_64_STR);
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DEFAULT_DNS);
+      Assert.fail(EXCEPTION_PREFIX + "application with no launch command");
+    } catch (IllegalArgumentException e) {
+      assertEquals(RestApiErrorMessages.ERROR_ABSENT_LAUNCH_COMMAND,
+          e.getMessage());
+    }
+
+    // launch command not specified
+    app.setName(LEN_64_STR.substring(0, RegistryConstants
+        .MAX_FQDN_LABEL_LENGTH));
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
       Assert.fail(EXCEPTION_PREFIX + "application with no launch command");
     } catch (IllegalArgumentException e) {
       assertEquals(RestApiErrorMessages.ERROR_ABSENT_LAUNCH_COMMAND,
@@ -101,7 +131,7 @@ public class TestServiceApiUtil {
     // resource not specified
     app.setLaunchCommand("sleep 3600");
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
       Assert.fail(EXCEPTION_PREFIX + "application with no resource");
     } catch (IllegalArgumentException e) {
       assertEquals(String.format(
@@ -113,7 +143,7 @@ public class TestServiceApiUtil {
     Resource res = new Resource();
     app.setResource(res);
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
       Assert.fail(EXCEPTION_PREFIX + "application with no memory");
     } catch (IllegalArgumentException e) {
       assertEquals(String.format(
@@ -125,7 +155,7 @@ public class TestServiceApiUtil {
     res.setMemory("100mb");
     res.setCpus(-2);
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
       Assert.fail(
           EXCEPTION_PREFIX + "application with invalid no of cpus");
     } catch (IllegalArgumentException e) {
@@ -137,7 +167,7 @@ public class TestServiceApiUtil {
     // number of containers not specified
     res.setCpus(2);
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
       Assert.fail(EXCEPTION_PREFIX + "application with no container count");
     } catch (IllegalArgumentException e) {
       Assert.assertTrue(e.getMessage()
@@ -147,7 +177,7 @@ public class TestServiceApiUtil {
     // specifying profile along with cpus/memory raises exception
     res.setProfile("hbase_finance_large");
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
       Assert.fail(EXCEPTION_PREFIX
           + "application with resource profile along with cpus/memory");
     } catch (IllegalArgumentException e) {
@@ -162,7 +192,7 @@ public class TestServiceApiUtil {
     res.setCpus(null);
     res.setMemory(null);
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
       Assert.fail(EXCEPTION_PREFIX + "application with resource profile only");
     } catch (IllegalArgumentException e) {
       assertEquals(ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET,
@@ -176,7 +206,7 @@ public class TestServiceApiUtil {
 
     // null number of containers
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
       Assert.fail(EXCEPTION_PREFIX + "null number of containers");
     } catch (IllegalArgumentException e) {
       Assert.assertTrue(e.getMessage()
@@ -186,7 +216,7 @@ public class TestServiceApiUtil {
     // negative number of containers
     app.setNumberOfContainers(-1L);
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
       Assert.fail(EXCEPTION_PREFIX + "negative number of containers");
     } catch (IllegalArgumentException e) {
       Assert.assertTrue(e.getMessage()
@@ -196,7 +226,7 @@ public class TestServiceApiUtil {
     // everything valid here
     app.setNumberOfContainers(5L);
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
     } catch (IllegalArgumentException e) {
       LOG.error("application attributes specified should be valid here", e);
       Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
@@ -218,7 +248,7 @@ public class TestServiceApiUtil {
     Artifact artifact = new Artifact();
     app.setArtifact(artifact);
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
       Assert.fail(EXCEPTION_PREFIX + "application with no artifact id");
     } catch (IllegalArgumentException e) {
       assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage());
@@ -227,7 +257,7 @@ public class TestServiceApiUtil {
     // no artifact id fails with APPLICATION type
     artifact.setType(Artifact.TypeEnum.APPLICATION);
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
       Assert.fail(EXCEPTION_PREFIX + "application with no artifact id");
     } catch (IllegalArgumentException e) {
       assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage());
@@ -236,7 +266,7 @@ public class TestServiceApiUtil {
     // no artifact id fails with TARBALL type
     artifact.setType(Artifact.TypeEnum.TARBALL);
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
       Assert.fail(EXCEPTION_PREFIX + "application with no artifact id");
     } catch (IllegalArgumentException e) {
       assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage());
@@ -246,7 +276,7 @@ public class TestServiceApiUtil {
     artifact.setType(Artifact.TypeEnum.DOCKER);
     artifact.setId("docker.io/centos:centos7");
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
     } catch (IllegalArgumentException e) {
       LOG.error("application attributes specified should be valid here", e);
       Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
@@ -314,7 +344,7 @@ public class TestServiceApiUtil {
     app.setArtifact(artifact);
 
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
     } catch (IllegalArgumentException e) {
       Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
     }
@@ -333,7 +363,7 @@ public class TestServiceApiUtil {
 
     // duplicate component name fails
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
       Assert.fail(EXCEPTION_PREFIX + "application with component collision");
     } catch (IllegalArgumentException e) {
       assertEquals("Component name collision: " + compName, e.getMessage());
@@ -353,7 +383,7 @@ public class TestServiceApiUtil {
 
     // duplicate component name okay in the case of APPLICATION component
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
     } catch (IllegalArgumentException e) {
       Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
     }
@@ -371,7 +401,7 @@ public class TestServiceApiUtil {
     app.setArtifact(artifact);
 
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
     } catch (IllegalArgumentException e) {
       Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
     }
@@ -384,7 +414,7 @@ public class TestServiceApiUtil {
     app.getComponent("comp2").setArtifact(artifact);
 
     try {
-      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
     } catch (IllegalArgumentException e) {
       Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
     }
@@ -441,4 +471,52 @@ public class TestServiceApiUtil {
           .getMessage());
     }
   }
+
+  @Test
+  public void testInvalidComponent() throws IOException {
+    SliderFileSystem sfs = initMock(null);
+    testComponent(sfs, false);
+    testComponent(sfs, true);
+  }
+
+  private static void testComponent(SliderFileSystem sfs, boolean unique)
+      throws IOException {
+    int maxLen = RegistryConstants.MAX_FQDN_LABEL_LENGTH;
+    if (unique) {
+      assertEquals(19, Long.toString(Long.MAX_VALUE).length());
+      maxLen = maxLen - Long.toString(Long.MAX_VALUE).length();
+    }
+    String compName = LEN_64_STR.substring(0, maxLen + 1);
+    Application app = createValidApplication(null);
+    app.addComponent(createValidComponent(compName).uniqueComponentSupport(
+        unique));
+
+    // invalid component name fails if dns is enabled
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
+      Assert.fail(EXCEPTION_PREFIX + "application with invalid component name");
+    } catch (IllegalArgumentException e) {
+      assertEquals(String.format(RestApiErrorMessages
+          .ERROR_COMPONENT_NAME_INVALID, maxLen, compName), e.getMessage());
+    }
+
+    // does not fail if dns is disabled
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DEFAULT_DNS);
+    } catch (IllegalArgumentException e) {
+      Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
+    }
+
+    compName = LEN_64_STR.substring(0, maxLen);
+    app = createValidApplication(null);
+    app.addComponent(createValidComponent(compName).uniqueComponentSupport(
+        unique));
+
+    // does not fail
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED);
+    } catch (IllegalArgumentException e) {
+      Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40545156/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
index f4fecfd..e66a761 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
@@ -78,6 +78,11 @@ public interface RegistryConstants {
   String KEY_DNS_DOMAIN = DNS_PREFIX + "domain-name";
 
   /**
+   * Max length of a label (node delimited by a dot in the FQDN).
+   */
+  int MAX_FQDN_LABEL_LENGTH = 63;
+
+  /**
    * DNS bind address.
    */
   String KEY_DNS_BIND_ADDRESS = DNS_PREFIX + "bind-address";


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: YARN-6716. Native services support for specifying component start order. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ReportingLoop.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ReportingLoop.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ReportingLoop.java
deleted file mode 100644
index 096838d..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ReportingLoop.java
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.servicemonitor;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * This is the monitor service
- */
-public final class ReportingLoop implements Runnable, ProbeReportHandler, MonitorKeys, Closeable {
-  protected static final Logger log = LoggerFactory.getLogger(ReportingLoop.class);
-  private final ProbeWorker worker;
-  private final Thread workerThread;
-  private final int reportInterval;
-  private final int probeTimeout;
-  private final int bootstrapTimeout;
-  private ProbeReportHandler reporter;
-  private final String name;
-  private volatile boolean mustExit;
-
-  public ReportingLoop(String name,
-                       ProbeReportHandler reporter,
-                       List<Probe> probes,
-                       List<Probe> dependencyProbes,
-                       int probeInterval,
-                       int reportInterval,
-                       int probeTimeout,
-                       int bootstrapTimeout) throws IOException {
-    this(name,
-         reporter,
-         new ProbeWorker(probes, dependencyProbes, probeInterval, bootstrapTimeout),
-         reportInterval,
-         probeTimeout);
-  }
-
-  /**
-   * Create a new reporting loop -and bond the worker's ProbeReportHandler
-   * to us
-   * @param name
-   * @param reporter
-   * @param worker
-   * @param reportInterval
-   * @param probeTimeout
-   */
-  public ReportingLoop(String name,
-                       ProbeReportHandler reporter,
-                       ProbeWorker worker,
-                       int reportInterval,
-                       int probeTimeout) throws IOException {
-    this.name = name;
-    this.reporter = reporter;
-    this.reportInterval = reportInterval;
-    this.probeTimeout = probeTimeout;
-    this.worker = worker;
-    this.bootstrapTimeout = worker.getBootstrapTimeout();
-    worker.setReportHandler(this);
-    workerThread = new Thread(worker, "probe thread - " + name);
-    worker.init();
-  }
-  
-  public int getBootstrapTimeout() {
-    return bootstrapTimeout;
-  }
-
-  public ReportingLoop withReporter(ProbeReportHandler reporter) {
-    assert this.reporter == null : "attempting to reassign reporter ";
-    assert reporter != null : "new reporter is null";
-    this.reporter = reporter;
-    return this;
-  }
-
-  /**
-   * Start the monitoring.
-   *
-   * @return false if the monitoring did not start and that the worker threads
-   *         should be run up.
-   */
-  public boolean startReporting() {
-    String description = "Service Monitor for " + name + ", probe-interval= "
-                         + MonitorUtils.millisToHumanTime(worker.interval)
-                         + ", report-interval=" + MonitorUtils.millisToHumanTime(reportInterval)
-                         + ", probe-timeout=" + timeoutToStr(probeTimeout)
-                         + ", bootstrap-timeout=" + timeoutToStr(bootstrapTimeout);
-    log.info("Starting reporting"
-             + " to " + reporter
-             + description);
-    return reporter.commence(name, description);
-  }
-
-  private String timeoutToStr(int timeout) {
-    return timeout >= 0 ? MonitorUtils.millisToHumanTime(timeout) : "not set";
-  }
-
-  private void startWorker() {
-    log.info("Starting reporting worker thread ");
-    workerThread.setDaemon(true);
-    workerThread.start();
-  }
-
-
-  /**
-   * This exits the process cleanly
-   */
-  @Override
-  public void close() {
-    log.info("Stopping reporting");
-    mustExit = true;
-    if (worker != null) {
-      worker.setMustExit();
-      workerThread.interrupt();
-    }
-    if (reporter != null) {
-      reporter.unregister();
-    }
-  }
-
-  @Override
-  public void probeFailure(ProbeFailedException exception) {
-    reporter.probeFailure(exception);
-  }
-
-  @Override
-  public void probeProcessStateChange(ProbePhase probePhase) {
-    reporter.probeProcessStateChange(probePhase);
-  }
-
-  @Override
-  public void probeBooted(ProbeStatus status) {
-    reporter.probeBooted(status);
-  }
-
-  private long now() {
-    return System.currentTimeMillis();
-  }
-
-  @Override
-  public void probeResult(ProbePhase phase, ProbeStatus status) {
-    reporter.probeResult(phase, status);
-  }
-
-  @Override
-  public boolean commence(String n, String description) {
-    return true;
-  }
-
-  @Override
-  public void unregister() {
-  }
-
-  @Override
-  public void heartbeat(ProbeStatus status) {
-  }
-
-  @Override
-  public void probeTimedOut(ProbePhase currentPhase, Probe probe, ProbeStatus lastStatus,
-      long currentTime) {
-  }
-
-  @Override
-  public void liveProbeCycleCompleted() {
-    //delegate to the reporter
-    reporter.liveProbeCycleCompleted();
-  }
-
-  /**
-   * The reporting loop
-   */
-  void reportingLoop() {
-
-    while (!mustExit) {
-      try {
-        ProbeStatus workerStatus = worker.getLastStatus();
-        long now = now();
-        long lastStatusIssued = workerStatus.getTimestamp();
-        long timeSinceLastStatusIssued = now - lastStatusIssued;
-        //two actions can occur here: a heartbeat is issued or a timeout reported. 
-        //this flag decides which
-        boolean heartbeat;
-
-        //based on phase, decide whether to heartbeat or timeout
-        ProbePhase probePhase = worker.getProbePhase();
-        switch (probePhase) {
-          case DEPENDENCY_CHECKING:
-            //no timeouts in dependency phase
-            heartbeat = true;
-            break;
-
-          case BOOTSTRAPPING:
-            //the timeout here is fairly straightforward: heartbeats are
-            //raised while the worker hasn't timed out
-            heartbeat = bootstrapTimeout < 0 || timeSinceLastStatusIssued < bootstrapTimeout;
-
-            break;
-
-          case LIVE:
-            //use the probe timeout interval between the current time
-            //and the time the last status event was received.
-            heartbeat = timeSinceLastStatusIssued < probeTimeout;
-            break;
-
-          case INIT:
-          case TERMINATING:
-          default:
-            //send a heartbeat, because this isn't the time to be failing
-            heartbeat = true;
-        }
-        if (heartbeat) {
-          //a heartbeat is sent to the reporter
-          reporter.heartbeat(workerStatus);
-        } else {
-          //no response from the worker -it is hung.
-          reporter.probeTimedOut(probePhase,
-                                 worker.getCurrentProbe(),
-                                 workerStatus,
-                                 now
-                                );
-        }
-
-        //now sleep
-        Thread.sleep(reportInterval);
-
-      } catch (InterruptedException e) {
-        //interrupted -always exit the loop.
-        break;
-      }
-    }
-    //this point is reached if and only if a clean exit was requested or something failed.
-  }
-
-  /**
-   * This can be run in a separate thread, or it can be run directly from the caller.
-   * Test runs do the latter, HAM runs multiple reporting threads.
-   */
-  @Override
-  public void run() {
-    try {
-      startWorker();
-      reportingLoop();
-    } catch (RuntimeException e) {
-      log.warn("Failure in the reporting loop: " + e, e);
-      //rethrow so that inline code can pick it up (e.g. test runs)
-      throw e;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
index 676db82..3033537 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
@@ -62,6 +62,10 @@ public interface RestApiErrorMessages {
       "Invalid no of containers specified";
   String ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID =
       ERROR_CONTAINERS_COUNT_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+  String ERROR_DEPENDENCY_INVALID = "Dependency %s for component %s is " +
+      "invalid, does not exist as a component";
+  String ERROR_DEPENDENCY_CYCLE = "Invalid dependencies, a cycle may " +
+      "exist: %s";
 
   String ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED =
       "Cannot specify" + " cpus/memory along with profile";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
index 80a31c0..e977727 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
@@ -32,14 +32,18 @@ import org.apache.slider.common.tools.SliderUtils;
 import org.apache.slider.core.persist.JsonSerDeser;
 import org.apache.slider.providers.AbstractClientProvider;
 import org.apache.slider.providers.SliderProviderFactory;
+import org.apache.slider.server.servicemonitor.MonitorUtils;
 import org.codehaus.jackson.map.PropertyNamingStrategy;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashSet;
+import java.util.LinkedHashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 public class ServiceApiUtil {
@@ -176,9 +180,22 @@ public class ServiceApiUtil {
       if (comp.getLaunchCommand() == null) {
         comp.setLaunchCommand(globalLaunchCommand);
       }
+      // validate dependency existence
+      if (comp.getDependencies() != null) {
+        for (String dependency : comp.getDependencies()) {
+          if (!componentNames.contains(dependency)) {
+            throw new IllegalArgumentException(String.format(
+                RestApiErrorMessages.ERROR_DEPENDENCY_INVALID, dependency,
+                comp.getName()));
+          }
+        }
+      }
       validateComponent(comp, fs.getFileSystem());
     }
 
+    // validate dependency tree
+    sortByDependencies(application.getComponents());
+
     // Application lifetime if not specified, is set to unlimited lifetime
     if (application.getLifetime() == null) {
       application.setLifetime(RestApiConstants.DEFAULT_UNLIMITED_LIFETIME);
@@ -207,6 +224,8 @@ public class ServiceApiUtil {
     }
     compClientProvider.validateConfigFiles(comp.getConfiguration()
         .getFiles(), fs);
+
+    MonitorUtils.getProbe(comp.getReadinessCheck());
   }
 
   @VisibleForTesting
@@ -301,6 +320,67 @@ public class ServiceApiUtil {
     return comp;
   }
 
+  public static Collection<Component> sortByDependencies(List<Component>
+      components) {
+    Map<String, Component> sortedComponents =
+        sortByDependencies(components, null);
+    return sortedComponents.values();
+  }
+
+  /**
+   * Each internal call of sortByDependencies will identify all of the
+   * components with the same dependency depth (the lowest depth that has not
+   * been processed yet) and add them to the sortedComponents list, preserving
+   * their original ordering in the components list.
+   *
+   * So the first time it is called, all components with no dependencies
+   * (depth 0) will be identified. The next time it is called, all components
+   * that have dependencies only on the the depth 0 components will be
+   * identified (depth 1). This will be repeated until all components have
+   * been added to the sortedComponents list. If no new components are
+   * identified but the sortedComponents list is not complete, an error is
+   * thrown.
+   */
+  private static Map<String, Component> sortByDependencies(List<Component>
+      components, Map<String, Component> sortedComponents) {
+    if (sortedComponents == null) {
+      sortedComponents = new LinkedHashMap<>();
+    }
+
+    Map<String, Component> componentsToAdd = new LinkedHashMap<>();
+    List<Component> componentsSkipped = new ArrayList<>();
+    for (Component component : components) {
+      String name = component.getName();
+      if (sortedComponents.containsKey(name)) {
+        continue;
+      }
+      boolean dependenciesAlreadySorted = true;
+      if (!SliderUtils.isEmpty(component.getDependencies())) {
+        for (String dependency : component.getDependencies()) {
+          if (!sortedComponents.containsKey(dependency)) {
+            dependenciesAlreadySorted = false;
+            break;
+          }
+        }
+      }
+      if (dependenciesAlreadySorted) {
+        componentsToAdd.put(name, component);
+      } else {
+        componentsSkipped.add(component);
+      }
+    }
+
+    if (componentsToAdd.size() == 0) {
+      throw new IllegalArgumentException(String.format(RestApiErrorMessages
+          .ERROR_DEPENDENCY_CYCLE, componentsSkipped));
+    }
+    sortedComponents.putAll(componentsToAdd);
+    if (sortedComponents.size() == components.size()) {
+      return sortedComponents;
+    }
+    return sortByDependencies(components, sortedComponents);
+  }
+
   public static String $(String s) {
     return "${" + s +"}";
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDependencies.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDependencies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDependencies.java
new file mode 100644
index 0000000..2967309
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDependencies.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.slider.api.types.ApplicationLivenessInformation;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.state.ContainerAssignment;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.apache.slider.server.servicemonitor.ProbeStatus;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Test for postponing container requests until dependencies are ready.
+ */
+public class TestMockAppStateDependencies extends BaseMockAppStateTest
+    implements MockRoles {
+
+  private org.apache.slider.server.servicemonitor.Probe successProbe =
+      new org.apache.slider.server.servicemonitor.Probe("success", null) {
+        @Override
+        public ProbeStatus ping(RoleInstance roleInstance) {
+          ProbeStatus status = new ProbeStatus();
+          status.succeed(this);
+          return status;
+        }
+      };
+
+  private org.apache.slider.server.servicemonitor.Probe failureProbe =
+      new org.apache.slider.server.servicemonitor.Probe("failure", null) {
+        @Override
+        public ProbeStatus ping(RoleInstance roleInstance) {
+          ProbeStatus status = new ProbeStatus();
+          status.fail(this, new Exception());
+          return status;
+        }
+      };
+
+  @Override
+  public String getTestName() {
+    return "TestMockAppStateDependencies";
+  }
+
+  @Test
+  public void testDependencies() throws Throwable {
+    RoleStatus role0Status = getRole0Status();
+    RoleStatus role1Status = getRole1Status();
+
+    // set desired instances for role0 to 1
+    role0Status.setDesired(1);
+    // set probe for role0 to use a ping that will always succeed
+    role0Status.getProviderRole().probe = successProbe;
+
+    // set desired instances for role1 to 1
+    role1Status.setDesired(1);
+    // set role0 as a dependency of role1
+    role1Status.getProviderRole().component.setDependencies(Collections
+        .singletonList(ROLE0));
+
+    // role0 has no dependencies, so its dependencies are ready
+    assertTrue(appState.areDependenciesReady(role0Status));
+    // role1 dependency (role0) is not ready yet
+    assertFalse(appState.areDependenciesReady(role1Status));
+    // start the single requested instance for role0
+    review(ROLE0, 2);
+
+    // role0 is still not ready because a ping has not been issued
+    assertFalse(appState.areDependenciesReady(role1Status));
+    // issue pings
+    appState.monitorComponentInstances();
+    // now role0 is ready
+    assertTrue(appState.areDependenciesReady(role1Status));
+    // increase the desired containers for role0
+    role0Status.setDesired(2);
+    // role0 is no longer ready
+    assertFalse(appState.areDependenciesReady(role1Status));
+    // start a second instance for role0
+    review(ROLE0, 2);
+
+    // role0 is not ready because ping has not been issued for the new instance
+    assertFalse(appState.areDependenciesReady(role1Status));
+    // issue pings
+    appState.monitorComponentInstances();
+    // role0 is ready
+    assertTrue(appState.areDependenciesReady(role1Status));
+
+    // set probe for role0 to use a ping that will always fail
+    role0Status.getProviderRole().probe = failureProbe;
+    // issue pings
+    appState.monitorComponentInstances();
+    // role0 is not ready (failure probe works)
+    assertFalse(appState.areDependenciesReady(role1Status));
+    // set probe for role0 to use a ping that will always succeed
+    role0Status.getProviderRole().probe = successProbe;
+    // issue pings
+    appState.monitorComponentInstances();
+    // role0 is ready
+    assertTrue(appState.areDependenciesReady(role1Status));
+
+    // now role1 instances can be started
+    review(ROLE1, 1);
+  }
+
+  public void review(String expectedRole, int outstanding) throws Exception {
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+
+    // expect one request in the list
+    assertEquals(1, ops.size());
+    // and in a liveness check, expected outstanding
+    ApplicationLivenessInformation liveness =
+        appState.getApplicationLivenessInformation();
+    assertEquals(outstanding, liveness.requestsOutstanding);
+    assertFalse(liveness.allRequestsSatisfied);
+
+    // record container allocated and verify it has the expected role
+    List<Container> allocations = engine.execute(ops);
+    List<ContainerAssignment> assignments = new ArrayList<>();
+    List<AbstractRMOperation> releases = new ArrayList<>();
+    appState.onContainersAllocated(allocations, assignments, releases);
+    assertEquals(1, assignments.size());
+    ContainerAssignment assigned = assignments.get(0);
+    Container target = assigned.container;
+    RoleInstance ri = roleInstance(assigned);
+    assertEquals(expectedRole, ri.role);
+
+    // one fewer request outstanding
+    liveness = appState.getApplicationLivenessInformation();
+    assertEquals(outstanding - 1, liveness.requestsOutstanding);
+
+    // record container start submitted
+    appState.containerStartSubmitted(target, ri);
+
+    // additional review results in no additional requests
+    ops = appState.reviewRequestAndReleaseNodes();
+    assertTrue(ops.isEmpty());
+
+    // record container start
+    appState.innerOnNodeManagerContainerStarted(target.getId());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
index 703d65f..edc1866 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
@@ -103,8 +103,6 @@ public class TestMockAppStateUniqueNames extends BaseMockAppStateTest
       assertEquals(i, instance.componentId);
       assertEquals(group, instance.role);
       assertEquals(group, instance.providerRole.name);
-      assertEquals(group, instance.providerRole.group);
-      // TODO remove group from provider role if it continues to be unused
       i++;
     }
   }
@@ -124,7 +122,6 @@ public class TestMockAppStateUniqueNames extends BaseMockAppStateTest
     assertEquals(0, roleStatus.getDesired());
     assertEquals(1024L, roleStatus.getResourceRequirements().getMemorySize());
     assertEquals(2, roleStatus.getResourceRequirements().getVirtualCores());
-    assertEquals("group1", roleStatus.getGroup());
 
     // now flex back up
     appState.updateComponents(Collections.singletonMap("group1", 3L));
@@ -147,7 +144,6 @@ public class TestMockAppStateUniqueNames extends BaseMockAppStateTest
     RoleStatus group1 = appState.lookupRoleStatus("group1");
     assertEquals(3, group1.getDesired());
     assertEquals(1024L, group1.getResourceRequirements().getMemorySize());
-    assertEquals("group1", group1.getGroup());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryOutstandingRequestTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryOutstandingRequestTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryOutstandingRequestTracker.java
index 7d8f5a7..555db75 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryOutstandingRequestTracker.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryOutstandingRequestTracker.java
@@ -343,7 +343,7 @@ public class TestRoleHistoryOutstandingRequestTracker extends
   public void testBuildResourceRequirements() throws Throwable {
     // Store original values
     Application application = appState.getClusterStatus();
-    Component role0 = application.getComponent(getRole0Status().getGroup());
+    Component role0 = application.getComponent(getRole0Status().getName());
     String origMem = role0.getResource().getMemory();
     Integer origVcores = role0.getResource().getCpus();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/servicemonitor/TestPortProbe.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/servicemonitor/TestPortProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/servicemonitor/TestPortProbe.java
index 458d1bc..dacfb0a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/servicemonitor/TestPortProbe.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/servicemonitor/TestPortProbe.java
@@ -17,20 +17,25 @@
 
 package org.apache.slider.server.servicemonitor;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.junit.Assert;
 import org.junit.Test;
 
 public class TestPortProbe extends Assert {
+  private final MockFactory factory = MockFactory.INSTANCE;
+
   /**
    * Assert that a port probe failed if the port is closed
    * @throws Throwable
    */
   @Test
   public void testPortProbeFailsClosedPort() throws Throwable {
-    PortProbe probe = new PortProbe("127.0.0.1", 65500, 100, "", new Configuration());
+    PortProbe probe = new PortProbe(65500, 100);
     probe.init();
-    ProbeStatus status = probe.ping(true);
+    RoleInstance roleInstance = new RoleInstance(factory.newContainer());
+    roleInstance.ip = "127.0.0.1";
+    ProbeStatus status = probe.ping(roleInstance);
     assertFalse("Expected a failure but got successful result: " + status,
       status.isSuccess());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
index 9ca3242..d7a9cfd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
@@ -34,6 +34,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
 
 import static org.apache.slider.util.RestApiConstants.DEFAULT_COMPONENT_NAME;
 import static org.apache.slider.util.RestApiConstants.DEFAULT_UNLIMITED_LIFETIME;
@@ -390,4 +393,52 @@ public class TestServiceApiUtil {
     // original component replaced by external component
     assertNotNull(app.getComponent("comp1"));
   }
+
+  public static void verifyDependencySorting(List<Component> components,
+      Component... expectedSorting) {
+    Collection<Component> actualSorting = ServiceApiUtil.sortByDependencies(
+        components);
+    assertEquals(expectedSorting.length, actualSorting.size());
+    int i = 0;
+    for (Component component : actualSorting) {
+      assertEquals(expectedSorting[i++], component);
+    }
+  }
+
+  @Test
+  public void testDependencySorting() throws IOException {
+    Component a = new Component().name("a");
+    Component b = new Component().name("b");
+    Component c = new Component().name("c");
+    Component d = new Component().name("d").dependencies(Arrays.asList("c"));
+    Component e = new Component().name("e").dependencies(Arrays.asList("b",
+        "d"));
+
+    verifyDependencySorting(Arrays.asList(a, b, c), a, b, c);
+    verifyDependencySorting(Arrays.asList(c, a, b), c, a, b);
+    verifyDependencySorting(Arrays.asList(a, b, c, d, e), a, b, c, d, e);
+    verifyDependencySorting(Arrays.asList(e, d, c, b, a), c, b, a, d, e);
+
+    c.setDependencies(Arrays.asList("e"));
+    try {
+      verifyDependencySorting(Arrays.asList(a, b, c, d, e));
+      Assert.fail(EXCEPTION_PREFIX + "components with dependency cycle");
+    } catch (IllegalArgumentException ex) {
+      assertEquals(String.format(
+          RestApiErrorMessages.ERROR_DEPENDENCY_CYCLE, Arrays.asList(c, d,
+              e)), ex.getMessage());
+    }
+
+    SliderFileSystem sfs = initMock(null);
+    Application application = createValidApplication(null);
+    application.setComponents(Arrays.asList(c, d, e));
+    try {
+      ServiceApiUtil.validateAndResolveApplication(application, sfs);
+      Assert.fail(EXCEPTION_PREFIX + "components with bad dependencies");
+    } catch (IllegalArgumentException ex) {
+      assertEquals(String.format(
+          RestApiErrorMessages.ERROR_DEPENDENCY_INVALID, "b", "e"), ex
+          .getMessage());
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: YARN-6613. Update json validation for new native services providers. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestMiscSliderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestMiscSliderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestMiscSliderUtils.java
deleted file mode 100644
index bf6ee2c..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestMiscSliderUtils.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.common.tools;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.slider.utils.SliderTestBase;
-import org.junit.Test;
-
-import java.net.URI;
-
-/**
- * Test slider utils.
- */
-public class TestMiscSliderUtils extends SliderTestBase {
-
-
-  public static final String CLUSTER1 = "cluster1";
-
-  @Test
-  public void testPurgeTempDir() throws Throwable {
-
-    Configuration configuration = new Configuration();
-    FileSystem fs = FileSystem.get(new URI("file:///"), configuration);
-    SliderFileSystem sliderFileSystem = new SliderFileSystem(fs, configuration);
-    Path inst = sliderFileSystem.createAppInstanceTempPath(CLUSTER1, "001");
-
-    assertTrue(fs.exists(inst));
-    sliderFileSystem.purgeAppInstanceTempFiles(CLUSTER1);
-    assertFalse(fs.exists(inst));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleAppJson.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleAppJson.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleAppJson.java
new file mode 100644
index 0000000..1700771
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleAppJson.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.core.conf;
+
+import org.apache.slider.api.resource.Application;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.slider.utils.SliderTestUtils.JSON_SER_DESER;
+
+/**
+ * Names of the example configs.
+ */
+public final class ExampleAppJson {
+
+  public static final String APP_JSON = "app.json";
+  public static final String OVERRIDE_JSON = "app-override.json";
+  public static final String DEFAULT_JSON = "default.json";
+  public static final String EXTERNAL_JSON_0 = "external0.json";
+  public static final String EXTERNAL_JSON_1 = "external1.json";
+  public static final String EXTERNAL_JSON_2 = "external2.json";
+
+  public static final String PACKAGE = "/org/apache/slider/core/conf/examples/";
+
+
+  private static final String[] ALL_EXAMPLES = {APP_JSON, OVERRIDE_JSON,
+      DEFAULT_JSON};
+
+  public static final List<String> ALL_EXAMPLE_RESOURCES = new ArrayList<>();
+  static {
+    for (String example : ALL_EXAMPLES) {
+      ALL_EXAMPLE_RESOURCES.add(PACKAGE + example);
+    }
+  }
+
+  private ExampleAppJson() {
+  }
+
+  static Application loadResource(String name) throws IOException {
+    return JSON_SER_DESER.fromResource(PACKAGE + name);
+  }
+
+  public static String resourceName(String name) {
+    return "target/test-classes" + PACKAGE + name;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleConfResources.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleConfResources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleConfResources.java
deleted file mode 100644
index f13fbcc..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleConfResources.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.core.conf;
-
-import org.apache.slider.api.resource.Application;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.apache.slider.utils.SliderTestUtils.JSON_SER_DESER;
-
-/**
- * Names of the example configs.
- */
-public final class ExampleConfResources {
-
-  public static final String APP_JSON = "app.json";
-  public static final String APP_RES = "app-resolved.json";
-  public static final String OVERRIDE_JSON = "app-override.json";
-  public static final String OVERRIDE_RES = "app-override-resolved.json";
-
-  public static final String PACKAGE = "/org/apache/slider/core/conf/examples/";
-
-
-  private static final String[] ALL_EXAMPLES = {APP_JSON, APP_RES,
-      OVERRIDE_JSON, OVERRIDE_RES};
-
-  public static final List<String> ALL_EXAMPLE_RESOURCES = new ArrayList<>();
-  static {
-    for (String example : ALL_EXAMPLES) {
-      ALL_EXAMPLE_RESOURCES.add(PACKAGE + example);
-    }
-  }
-
-  private ExampleConfResources() {
-  }
-
-  static Application loadResource(String name) throws IOException {
-    return JSON_SER_DESER.fromResource(PACKAGE + name);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeLoadExamples.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeLoadExamples.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeLoadExamples.java
deleted file mode 100644
index 48b0736..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeLoadExamples.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.core.conf;
-
-import org.apache.slider.api.resource.Application;
-import org.apache.slider.common.tools.SliderUtils;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.util.Arrays;
-import java.util.Collection;
-
-import static org.apache.slider.utils.SliderTestUtils.JSON_SER_DESER;
-
-/**
- * Test loading example resources.
- */
-@RunWith(value = Parameterized.class)
-public class TestConfTreeLoadExamples extends Assert {
-  private String resource;
-
-  public TestConfTreeLoadExamples(String resource) {
-    this.resource = resource;
-  }
-
-  @Parameterized.Parameters
-  public static Collection<String[]> filenames() {
-    String[][] stringArray = new String[ExampleConfResources
-        .ALL_EXAMPLE_RESOURCES.size()][1];
-    int i = 0;
-    for (String s : ExampleConfResources.ALL_EXAMPLE_RESOURCES) {
-      stringArray[i++][0] = s;
-    }
-    return Arrays.asList(stringArray);
-  }
-
-  @Test
-  public void testLoadResource() throws Throwable {
-    try {
-      Application application = JSON_SER_DESER.fromResource(resource);
-      SliderUtils.resolve(application);
-    } catch (Exception e) {
-      throw new Exception("exception loading " + resource + ":" + e.toString());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfigurationResolve.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfigurationResolve.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfigurationResolve.java
index 285ddfa..5f5df70 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfigurationResolve.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfigurationResolve.java
@@ -18,20 +18,40 @@
 
 package org.apache.slider.core.conf;
 
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.ConfigFile;
+import org.apache.slider.api.resource.ConfigFile.TypeEnum;
 import org.apache.slider.api.resource.Configuration;
+import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.persist.JsonSerDeser;
+import org.apache.slider.util.ServiceApiUtil;
 import org.junit.Assert;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
 import static org.apache.slider.api.InternalKeys.CHAOS_MONKEY_INTERVAL;
 import static org.apache.slider.api.InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS;
 import static org.apache.slider.api.InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS;
 import static org.apache.slider.api.InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_MINUTES;
-import static org.apache.slider.core.conf.ExampleConfResources.APP_JSON;
-import static org.apache.slider.core.conf.ExampleConfResources.OVERRIDE_JSON;
+import static org.apache.slider.core.conf.ExampleAppJson.APP_JSON;
+import static org.apache.slider.core.conf.ExampleAppJson.EXTERNAL_JSON_1;
+import static org.apache.slider.core.conf.ExampleAppJson.OVERRIDE_JSON;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
 
 /**
  * Test global configuration resolution.
@@ -42,23 +62,26 @@ public class TestConfigurationResolve extends Assert {
 
   @Test
   public void testOverride() throws Throwable {
-
-    Application orig = ExampleConfResources.loadResource(OVERRIDE_JSON);
+    Application orig = ExampleAppJson.loadResource(OVERRIDE_JSON);
 
     Configuration global = orig.getConfiguration();
     assertEquals("a", global.getProperty("g1"));
     assertEquals("b", global.getProperty("g2"));
+    assertEquals(2, global.getFiles().size());
 
     Configuration simple = orig.getComponent("simple").getConfiguration();
     assertEquals(0, simple.getProperties().size());
+    assertEquals(1, simple.getFiles().size());
 
     Configuration master = orig.getComponent("master").getConfiguration();
     assertEquals("m", master.getProperty("name"));
     assertEquals("overridden", master.getProperty("g1"));
+    assertEquals(0, master.getFiles().size());
 
     Configuration worker = orig.getComponent("worker").getConfiguration();
     LOG.info("worker = {}", worker);
     assertEquals(3, worker.getProperties().size());
+    assertEquals(0, worker.getFiles().size());
 
     assertEquals("worker", worker.getProperty("name"));
     assertEquals("overridden-by-worker", worker.getProperty("g1"));
@@ -66,18 +89,36 @@ public class TestConfigurationResolve extends Assert {
     assertEquals("1000", worker.getProperty("timeout"));
 
     // here is the resolution
-    SliderUtils.resolve(orig);
+    SliderFileSystem sfs = createNiceMock(SliderFileSystem.class);
+    FileSystem mockFs = createNiceMock(FileSystem.class);
+    expect(sfs.getFileSystem()).andReturn(mockFs).anyTimes();
+    expect(sfs.buildClusterDirPath(anyObject())).andReturn(
+        new Path("cluster_dir_path")).anyTimes();
+    replay(sfs, mockFs);
+    ServiceApiUtil.validateAndResolveApplication(orig, sfs);
 
     global = orig.getConfiguration();
     LOG.info("global = {}", global);
     assertEquals("a", global.getProperty("g1"));
     assertEquals("b", global.getProperty("g2"));
+    assertEquals(2, global.getFiles().size());
 
     simple = orig.getComponent("simple").getConfiguration();
     assertEquals(2, simple.getProperties().size());
     assertEquals("a", simple.getProperty("g1"));
     assertEquals("b", simple.getProperty("g2"));
-
+    assertEquals(2, simple.getFiles().size());
+
+    Set<ConfigFile> files = new HashSet<>();
+    Map<String, String> props = new HashMap<>();
+    props.put("k1", "overridden");
+    props.put("k2", "v2");
+    files.add(new ConfigFile().destFile("file1").type(TypeEnum
+        .PROPERTIES).props(props));
+    files.add(new ConfigFile().destFile("file2").type(TypeEnum
+        .XML).props(Collections.singletonMap("k3", "v3")));
+    assertTrue(files.contains(simple.getFiles().get(0)));
+    assertTrue(files.contains(simple.getFiles().get(1)));
 
     master = orig.getComponent("master").getConfiguration();
     LOG.info("master = {}", master);
@@ -85,6 +126,17 @@ public class TestConfigurationResolve extends Assert {
     assertEquals("m", master.getProperty("name"));
     assertEquals("overridden", master.getProperty("g1"));
     assertEquals("b", master.getProperty("g2"));
+    assertEquals(2, master.getFiles().size());
+
+    props.put("k1", "v1");
+    files.clear();
+    files.add(new ConfigFile().destFile("file1").type(TypeEnum
+        .PROPERTIES).props(props));
+    files.add(new ConfigFile().destFile("file2").type(TypeEnum
+        .XML).props(Collections.singletonMap("k3", "v3")));
+
+    assertTrue(files.contains(master.getFiles().get(0)));
+    assertTrue(files.contains(master.getFiles().get(1)));
 
     worker = orig.getComponent("worker").getConfiguration();
     LOG.info("worker = {}", worker);
@@ -94,13 +146,91 @@ public class TestConfigurationResolve extends Assert {
     assertEquals("overridden-by-worker", worker.getProperty("g1"));
     assertEquals("b", worker.getProperty("g2"));
     assertEquals("1000", worker.getProperty("timeout"));
+    assertEquals(2, worker.getFiles().size());
 
+    assertTrue(files.contains(worker.getFiles().get(0)));
+    assertTrue(files.contains(worker.getFiles().get(1)));
   }
 
   @Test
-  public void testTimeIntervalLoading() throws Throwable {
+  public void testOverrideExternalConfiguration() throws IOException {
+    Application orig = ExampleAppJson.loadResource(EXTERNAL_JSON_1);
+
+    Configuration global = orig.getConfiguration();
+    assertEquals(0, global.getProperties().size());
+
+    assertEquals(3, orig.getComponents().size());
 
-    Application orig = ExampleConfResources.loadResource(APP_JSON);
+    Configuration simple = orig.getComponent("simple").getConfiguration();
+    assertEquals(0, simple.getProperties().size());
+
+    Configuration master = orig.getComponent("master").getConfiguration();
+    assertEquals(1, master.getProperties().size());
+    assertEquals("is-overridden", master.getProperty("g3"));
+
+    Configuration other = orig.getComponent("other").getConfiguration();
+    assertEquals(0, other.getProperties().size());
+
+    // load the external application
+    SliderFileSystem sfs = createNiceMock(SliderFileSystem.class);
+    FileSystem mockFs = createNiceMock(FileSystem.class);
+    expect(sfs.getFileSystem()).andReturn(mockFs).anyTimes();
+    expect(sfs.buildClusterDirPath(anyObject())).andReturn(
+        new Path("cluster_dir_path")).anyTimes();
+    replay(sfs, mockFs);
+    Application ext = ExampleAppJson.loadResource(APP_JSON);
+    ServiceApiUtil.validateAndResolveApplication(ext, sfs);
+    reset(sfs, mockFs);
+
+    // perform the resolution on original application
+    JsonSerDeser<Application> jsonSerDeser = createNiceMock(JsonSerDeser
+        .class);
+    expect(sfs.getFileSystem()).andReturn(mockFs).anyTimes();
+    expect(sfs.buildClusterDirPath(anyObject())).andReturn(
+        new Path("cluster_dir_path")).anyTimes();
+    expect(jsonSerDeser.load(anyObject(), anyObject())).andReturn(ext)
+        .anyTimes();
+    replay(sfs, mockFs, jsonSerDeser);
+    ServiceApiUtil.setJsonSerDeser(jsonSerDeser);
+    ServiceApiUtil.validateAndResolveApplication(orig, sfs);
+
+    global = orig.getConfiguration();
+    assertEquals(0, global.getProperties().size());
+
+    assertEquals(4, orig.getComponents().size());
+
+    simple = orig.getComponent("simple").getConfiguration();
+    assertEquals(3, simple.getProperties().size());
+    assertEquals("a", simple.getProperty("g1"));
+    assertEquals("b", simple.getProperty("g2"));
+    assertEquals("60",
+        simple.getProperty("internal.chaos.monkey.interval.seconds"));
+
+    master = orig.getComponent("master").getConfiguration();
+    assertEquals(5, master.getProperties().size());
+    assertEquals("512M", master.getProperty("jvm.heapsize"));
+    assertEquals("overridden", master.getProperty("g1"));
+    assertEquals("b", master.getProperty("g2"));
+    assertEquals("is-overridden", master.getProperty("g3"));
+    assertEquals("60",
+        simple.getProperty("internal.chaos.monkey.interval.seconds"));
+
+    Configuration worker = orig.getComponent("worker").getConfiguration();
+    LOG.info("worker = {}", worker);
+    assertEquals(4, worker.getProperties().size());
+    assertEquals("512M", worker.getProperty("jvm.heapsize"));
+    assertEquals("overridden-by-worker", worker.getProperty("g1"));
+    assertEquals("b", worker.getProperty("g2"));
+    assertEquals("60",
+        worker.getProperty("internal.chaos.monkey.interval.seconds"));
+
+    other = orig.getComponent("other").getConfiguration();
+    assertEquals(0, other.getProperties().size());
+  }
+
+  @Test
+  public void testTimeIntervalLoading() throws Throwable {
+    Application orig = ExampleAppJson.loadResource(APP_JSON);
 
     Configuration conf = orig.getConfiguration();
     long s = conf.getPropertyLong(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestExampleAppJson.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestExampleAppJson.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestExampleAppJson.java
new file mode 100644
index 0000000..09096d0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestExampleAppJson.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.core.conf;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.util.ServiceApiUtil;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.util.Arrays;
+import java.util.Collection;
+
+import static org.apache.slider.utils.SliderTestUtils.JSON_SER_DESER;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+
+/**
+ * Test loading example resources.
+ */
+@RunWith(value = Parameterized.class)
+public class TestExampleAppJson extends Assert {
+  private String resource;
+
+  public TestExampleAppJson(String resource) {
+    this.resource = resource;
+  }
+
+  @Parameterized.Parameters
+  public static Collection<String[]> filenames() {
+    String[][] stringArray = new String[ExampleAppJson
+        .ALL_EXAMPLE_RESOURCES.size()][1];
+    int i = 0;
+    for (String s : ExampleAppJson.ALL_EXAMPLE_RESOURCES) {
+      stringArray[i++][0] = s;
+    }
+    return Arrays.asList(stringArray);
+  }
+
+  @Test
+  public void testLoadResource() throws Throwable {
+    try {
+      Application application = JSON_SER_DESER.fromResource(resource);
+
+      SliderFileSystem sfs = createNiceMock(SliderFileSystem.class);
+      FileSystem mockFs = createNiceMock(FileSystem.class);
+      expect(sfs.getFileSystem()).andReturn(mockFs).anyTimes();
+      expect(sfs.buildClusterDirPath(anyObject())).andReturn(
+          new Path("cluster_dir_path")).anyTimes();
+      replay(sfs, mockFs);
+
+      ServiceApiUtil.validateAndResolveApplication(application, sfs);
+    } catch (Exception e) {
+      throw new Exception("exception loading " + resource + ":" + e.toString());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestAbstractClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestAbstractClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestAbstractClientProvider.java
new file mode 100644
index 0000000..162d34c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestAbstractClientProvider.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.api.resource.Artifact;
+import org.apache.slider.api.resource.ConfigFile;
+import org.apache.slider.api.resource.ConfigFile.TypeEnum;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+
+/**
+ * Test the AbstractClientProvider shared methods.
+ */
+public class TestAbstractClientProvider {
+  private static final String EXCEPTION_PREFIX = "Should have thrown " +
+      "exception: ";
+  private static final String NO_EXCEPTION_PREFIX = "Should not have thrown " +
+      "exception: ";
+
+  private static class ClientProvider extends AbstractClientProvider {
+    @Override
+    public void validateArtifact(Artifact artifact, FileSystem fileSystem)
+        throws IOException {
+    }
+
+    @Override
+    protected void validateConfigFile(ConfigFile configFile,
+        FileSystem fileSystem) throws IOException {
+    }
+  }
+
+  @Test
+  public void testConfigFiles() throws IOException {
+    ClientProvider clientProvider = new ClientProvider();
+    FileSystem mockFs = createNiceMock(FileSystem.class);
+    expect(mockFs.exists(anyObject(Path.class))).andReturn(true).anyTimes();
+    replay(mockFs);
+
+    ConfigFile configFile = new ConfigFile();
+    List<ConfigFile> configFiles = new ArrayList<>();
+    configFiles.add(configFile);
+
+    try {
+      clientProvider.validateConfigFiles(configFiles, mockFs);
+      Assert.fail(EXCEPTION_PREFIX + "null file type");
+    } catch (IllegalArgumentException e) {
+    }
+
+    configFile.setType(TypeEnum.TEMPLATE);
+    try {
+      clientProvider.validateConfigFiles(configFiles, mockFs);
+      Assert.fail(EXCEPTION_PREFIX + "empty src_file for type template");
+    } catch (IllegalArgumentException e) {
+    }
+
+    configFile.setSrcFile("srcfile");
+    try {
+      clientProvider.validateConfigFiles(configFiles, mockFs);
+      Assert.fail(EXCEPTION_PREFIX + "empty dest file");
+    } catch (IllegalArgumentException e) {
+    }
+
+    configFile.setDestFile("destfile");
+    try {
+      clientProvider.validateConfigFiles(configFiles, mockFs);
+    } catch (IllegalArgumentException e) {
+      Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
+    }
+
+    configFile = new ConfigFile();
+    configFile.setType(TypeEnum.JSON);
+    configFile.setSrcFile(null);
+    configFile.setDestFile("path/destfile2");
+    configFiles.add(configFile);
+    try {
+      clientProvider.validateConfigFiles(configFiles, mockFs);
+      Assert.fail(EXCEPTION_PREFIX + "dest file with multiple path elements");
+    } catch (IllegalArgumentException e) {
+    }
+
+    configFile.setDestFile("/path/destfile2");
+    try {
+      clientProvider.validateConfigFiles(configFiles, mockFs);
+    } catch (IllegalArgumentException e) {
+      Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
+    }
+
+    configFile.setDestFile("destfile");
+    try {
+      clientProvider.validateConfigFiles(configFiles, mockFs);
+      Assert.fail(EXCEPTION_PREFIX + "duplicate dest file");
+    } catch (IllegalArgumentException e) {
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestBuildApplicationComponent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestBuildApplicationComponent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestBuildApplicationComponent.java
new file mode 100644
index 0000000..6df660d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestBuildApplicationComponent.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers;
+
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.client.SliderClient;
+import org.apache.slider.common.params.SliderActions;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.core.conf.ExampleAppJson;
+import org.apache.slider.core.main.ServiceLauncher;
+import org.apache.slider.util.ServiceApiUtil;
+import org.apache.slider.utils.YarnZKMiniClusterTestBase;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.apache.slider.common.params.Arguments.ARG_APPDEF;
+
+/**
+ * Test for building / resolving components of type APPLICATION.
+ */
+public class TestBuildApplicationComponent extends YarnZKMiniClusterTestBase {
+
+  private static void checkComponentNames(List<Component> components,
+      Set<String> names) {
+    assertEquals(names.size(), components.size());
+    for (Component comp : components) {
+      assertTrue(names.contains(comp.getName()));
+    }
+  }
+
+  public void buildAndCheckComponents(String appName, String appDef,
+      SliderFileSystem sfs, Set<String> names) throws Throwable {
+    ServiceLauncher<SliderClient> launcher = createOrBuildCluster(
+        SliderActions.ACTION_BUILD, appName, Arrays.asList(ARG_APPDEF,
+            ExampleAppJson.resourceName(appDef)), true, false);
+    SliderClient sliderClient = launcher.getService();
+    addToTeardown(sliderClient);
+
+    // verify the cluster exists
+    assertEquals(0, sliderClient.actionExists(appName, false));
+    // verify generated conf
+    List<Component> components = ServiceApiUtil.getApplicationComponents(sfs,
+        appName);
+    checkComponentNames(components, names);
+  }
+
+  @Test
+  public void testExternalComponentBuild() throws Throwable {
+    String clustername = createMiniCluster("", getConfiguration(), 1, true);
+
+    describe("verify external components");
+
+    SliderFileSystem sfs = createSliderFileSystem();
+
+    Set<String> nameSet = new HashSet<>();
+    nameSet.add("simple");
+    nameSet.add("master");
+    nameSet.add("worker");
+
+    buildAndCheckComponents("app-1", ExampleAppJson.APP_JSON, sfs,
+        nameSet);
+    buildAndCheckComponents("external-0", ExampleAppJson
+            .EXTERNAL_JSON_0, sfs, nameSet);
+
+    nameSet.add("other");
+
+    buildAndCheckComponents("external-1", ExampleAppJson
+        .EXTERNAL_JSON_1, sfs, nameSet);
+
+    nameSet.add("another");
+
+    buildAndCheckComponents("external-2", ExampleAppJson
+        .EXTERNAL_JSON_2, sfs, nameSet);
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestDefaultProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestDefaultProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestDefaultProvider.java
new file mode 100644
index 0000000..f1afe67
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestDefaultProvider.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers;
+
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.client.SliderClient;
+import org.apache.slider.common.params.SliderActions;
+import org.apache.slider.core.conf.ExampleAppJson;
+import org.apache.slider.core.main.ServiceLauncher;
+import org.apache.slider.utils.YarnZKMiniClusterTestBase;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.apache.slider.common.params.Arguments.ARG_APPDEF;
+
+/**
+ * Simple end-to-end test.
+ */
+public class TestDefaultProvider  extends YarnZKMiniClusterTestBase {
+
+  // TODO figure out how to run client commands against minicluster
+  // (currently errors out unable to find containing jar of AM for upload)
+  @Ignore
+  @Test
+  public void testDefaultProvider() throws Throwable {
+    createMiniCluster("", getConfiguration(), 1, true);
+    String appName = "default-1";
+
+    describe("verify default provider");
+
+    String appDef = ExampleAppJson.resourceName(ExampleAppJson
+        .DEFAULT_JSON);
+
+    ServiceLauncher<SliderClient> launcher = createOrBuildCluster(
+        SliderActions.ACTION_CREATE, appName, Arrays.asList(ARG_APPDEF,
+            appDef), true, true);
+    SliderClient sliderClient = launcher.getService();
+    addToTeardown(sliderClient);
+
+    Application application = sliderClient.actionStatus(appName);
+    assertEquals(1L, application.getContainers().size());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/BaseMockAppStateAATest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/BaseMockAppStateAATest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/BaseMockAppStateAATest.java
index c1f2886..6f4ca42 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/BaseMockAppStateAATest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/BaseMockAppStateAATest.java
@@ -43,7 +43,7 @@ public class BaseMockAppStateAATest extends BaseMockAppStateTest
   @Override
   public Application buildApplication() {
     Application application = factory.newApplication(0, 0, 0)
-        .name(getTestName());
+        .name(getValidTestName());
     application.getComponent(ROLE1).getConfiguration().setProperty(
         COMPONENT_PLACEMENT_POLICY, Integer.toString(PlacementPolicy
             .ANTI_AFFINITY_REQUIRED));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAPlacement.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAPlacement.java
index eb25b40..571e9d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAPlacement.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAPlacement.java
@@ -362,7 +362,7 @@ public class TestMockAppStateAAPlacement extends BaseMockAppStateAATest
     // now destroy the app state
     AppStateBindingInfo bindingInfo = buildBindingInfo();
     bindingInfo.application = factory.newApplication(0, 0, desiredAA).name(
-        getTestName());
+        getValidTestName());
     bindingInfo.application.getComponent(ROLE2)
         .getConfiguration().setProperty(COMPONENT_PLACEMENT_POLICY,
         Integer.toString(PlacementPolicy.ANTI_AFFINITY_REQUIRED));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateContainerFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateContainerFailure.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateContainerFailure.java
index ea0dcf4..9cbda4f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateContainerFailure.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateContainerFailure.java
@@ -203,7 +203,7 @@ public class TestMockAppStateContainerFailure extends BaseMockAppStateTest
     // Update instance definition to allow containers to fail any number of
     // times
     AppStateBindingInfo bindingInfo = buildBindingInfo();
-    bindingInfo.application.getConfiguration().setProperty(
+    bindingInfo.application.getComponent(ROLE0).getConfiguration().setProperty(
         ResourceKeys.CONTAINER_FAILURE_THRESHOLD, "0");
     appState = new MockAppState(bindingInfo);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java
index 6d8e963..7f7f93a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java
@@ -38,6 +38,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.File;
+import java.io.IOException;
 import java.util.Collections;
 
 /**
@@ -65,7 +66,7 @@ public class TestMockAppStateFlexDynamicRoles extends BaseMockAppStateTest
   }
 
   @Override
-  public AppStateBindingInfo buildBindingInfo() {
+  public AppStateBindingInfo buildBindingInfo() throws IOException {
     AppStateBindingInfo bindingInfo = super.buildBindingInfo();
     bindingInfo.releaseSelector = new MostRecentContainerReleaseSelector();
     return bindingInfo;
@@ -145,7 +146,7 @@ public class TestMockAppStateFlexDynamicRoles extends BaseMockAppStateTest
     appState = new MockAppState();
     AppStateBindingInfo binding2 = buildBindingInfo();
     binding2.application = factory.newApplication(0, 0, 0)
-        .name(getTestName());
+        .name(getValidTestName());
     binding2.historyPath = historyPath2;
     appState.buildInstance(binding2);
     // on this read there won't be the right number of roles

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRebuildOnAMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRebuildOnAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRebuildOnAMRestart.java
index b0634bf..d9c675d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRebuildOnAMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRebuildOnAMRestart.java
@@ -70,7 +70,7 @@ public class TestMockAppStateRebuildOnAMRestart extends BaseMockAppStateTest
 
     AppStateBindingInfo bindingInfo = buildBindingInfo();
     bindingInfo.application = factory.newApplication(r0, r1, r2)
-        .name(getTestName());
+        .name(getValidTestName());
     bindingInfo.liveContainers = containers;
     appState = new MockAppState(bindingInfo);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
index b7e967f..703d65f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
@@ -30,6 +30,7 @@ import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.apache.slider.server.appmaster.state.RoleStatus;
 import org.junit.Test;
 
+import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
@@ -59,7 +60,7 @@ public class TestMockAppStateUniqueNames extends BaseMockAppStateTest
   }
 
   @Override
-  public AppStateBindingInfo buildBindingInfo() {
+  public AppStateBindingInfo buildBindingInfo() throws IOException {
     AppStateBindingInfo bindingInfo = super.buildBindingInfo();
     bindingInfo.releaseSelector = new MostRecentContainerReleaseSelector();
     return bindingInfo;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java
index d382c8a..4aa5895 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java
@@ -40,7 +40,7 @@ public class TestMockContainerResourceAllocations extends BaseMockAppStateTest {
 
   @Override
   public Application buildApplication() {
-    return factory.newApplication(1, 0, 0).name(getTestName());
+    return factory.newApplication(1, 0, 0).name(getValidTestName());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
index 69abccf..5af87f9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
@@ -51,6 +51,7 @@ import org.apache.slider.server.appmaster.state.ProviderAppState;
 import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.apache.slider.server.appmaster.state.RoleStatus;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
+import org.apache.slider.util.ServiceApiUtil;
 import org.apache.slider.utils.SliderTestBase;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -62,6 +63,7 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.Locale;
 import java.util.Map.Entry;
 
 /**
@@ -118,7 +120,7 @@ public abstract class BaseMockAppStateTest extends SliderTestBase implements
     historyPath = new Path(historyWorkDir.toURI());
     fs.delete(historyPath, true);
     appState = new MockAppState(buildBindingInfo());
-    stateAccess = new ProviderAppState(getTestName(), appState);
+    stateAccess = new ProviderAppState(getValidTestName(), appState);
   }
 
   /**
@@ -127,9 +129,11 @@ public abstract class BaseMockAppStateTest extends SliderTestBase implements
    * from {@link #buildApplication()} ()}
    * @return
    */
-  protected AppStateBindingInfo buildBindingInfo() {
+  protected AppStateBindingInfo buildBindingInfo() throws IOException {
     AppStateBindingInfo binding = new AppStateBindingInfo();
     binding.application = buildApplication();
+    ServiceApiUtil.validateAndResolveApplication(binding.application,
+        sliderFileSystem);
     //binding.roles = new ArrayList<>(factory.ROLES);
     binding.fs = fs;
     binding.historyPath = historyPath;
@@ -142,7 +146,7 @@ public abstract class BaseMockAppStateTest extends SliderTestBase implements
    * @return the instance definition
    */
   public Application buildApplication() {
-    return factory.newApplication(0, 0, 0).name(getTestName());
+    return factory.newApplication(0, 0, 0).name(getValidTestName());
   }
 
   /**
@@ -153,6 +157,10 @@ public abstract class BaseMockAppStateTest extends SliderTestBase implements
     return methodName.getMethodName();
   }
 
+  public String getValidTestName() {
+    return getTestName().toLowerCase(Locale.ENGLISH);
+  }
+
   public RoleStatus getRole0Status() {
     return lookupRole(ROLE0);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFactory.java
index 2ac5087..8785b92 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFactory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFactory.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.slider.api.ResourceKeys;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.Component;
+import org.apache.slider.api.resource.Resource;
 import org.apache.slider.providers.PlacementPolicy;
 import org.apache.slider.providers.ProviderRole;
 
@@ -190,6 +191,8 @@ public class MockFactory implements MockRoles {
    */
   public Application newApplication(long r1, long r2, long r3) {
     Application application = new Application();
+    application.setLaunchCommand("sleep 60");
+    application.setResource(new Resource().memory("256"));
     application.getConfiguration().setProperty(ResourceKeys
         .NODE_FAILURE_THRESHOLD, Integer.toString(NODE_FAILURE_THRESHOLD));
     List<Component> components = application.getComponents();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
new file mode 100644
index 0000000..9ca3242
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
@@ -0,0 +1,393 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.utils;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Artifact;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.api.resource.Resource;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.core.persist.JsonSerDeser;
+import org.apache.slider.util.RestApiConstants;
+import org.apache.slider.util.RestApiErrorMessages;
+import org.apache.slider.util.ServiceApiUtil;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+import static org.apache.slider.util.RestApiConstants.DEFAULT_COMPONENT_NAME;
+import static org.apache.slider.util.RestApiConstants.DEFAULT_UNLIMITED_LIFETIME;
+import static org.apache.slider.util.RestApiErrorMessages.*;
+import static org.apache.slider.util.RestApiErrorMessages.ERROR_CONTAINERS_COUNT_INVALID;
+import static org.apache.slider.util.RestApiErrorMessages.ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Test for ServiceApiUtil helper methods.
+ */
+public class TestServiceApiUtil {
+  private static final Logger LOG = LoggerFactory
+      .getLogger(TestServiceApiUtil.class);
+  private static final String EXCEPTION_PREFIX = "Should have thrown " +
+      "exception: ";
+  private static final String NO_EXCEPTION_PREFIX = "Should not have thrown " +
+      "exception: ";
+
+  @Test(timeout = 90000)
+  public void testResourceValidation() throws Exception {
+    SliderFileSystem sfs = initMock(null);
+
+    Application app = new Application();
+
+    // no name
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      Assert.fail(EXCEPTION_PREFIX + "application with no name");
+    } catch (IllegalArgumentException e) {
+      assertEquals(ERROR_APPLICATION_NAME_INVALID, e.getMessage());
+    }
+
+    // bad format name
+    String[] badNames = {"4finance", "Finance", "finance@home"};
+    for (String badName : badNames) {
+      app.setName(badName);
+      try {
+        ServiceApiUtil.validateAndResolveApplication(app, sfs);
+        Assert.fail(EXCEPTION_PREFIX + "application with bad name " + badName);
+      } catch (IllegalArgumentException e) {
+        assertEquals(String.format(
+            ERROR_APPLICATION_NAME_INVALID_FORMAT, badName), e.getMessage());
+      }
+    }
+
+    // launch command not specified
+    app.setName("finance_home");
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      Assert.fail(EXCEPTION_PREFIX + "application with no launch command");
+    } catch (IllegalArgumentException e) {
+      assertEquals(RestApiErrorMessages.ERROR_ABSENT_LAUNCH_COMMAND,
+          e.getMessage());
+    }
+
+    // resource not specified
+    app.setLaunchCommand("sleep 3600");
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      Assert.fail(EXCEPTION_PREFIX + "application with no resource");
+    } catch (IllegalArgumentException e) {
+      assertEquals(String.format(
+          RestApiErrorMessages.ERROR_RESOURCE_FOR_COMP_INVALID,
+          RestApiConstants.DEFAULT_COMPONENT_NAME), e.getMessage());
+    }
+
+    // memory not specified
+    Resource res = new Resource();
+    app.setResource(res);
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      Assert.fail(EXCEPTION_PREFIX + "application with no memory");
+    } catch (IllegalArgumentException e) {
+      assertEquals(String.format(
+          RestApiErrorMessages.ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID,
+          RestApiConstants.DEFAULT_COMPONENT_NAME), e.getMessage());
+    }
+
+    // invalid no of cpus
+    res.setMemory("100mb");
+    res.setCpus(-2);
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      Assert.fail(
+          EXCEPTION_PREFIX + "application with invalid no of cpus");
+    } catch (IllegalArgumentException e) {
+      assertEquals(String.format(
+          RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE,
+          RestApiConstants.DEFAULT_COMPONENT_NAME), e.getMessage());
+    }
+
+    // number of containers not specified
+    res.setCpus(2);
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      Assert.fail(EXCEPTION_PREFIX + "application with no container count");
+    } catch (IllegalArgumentException e) {
+      Assert.assertTrue(e.getMessage()
+          .contains(ERROR_CONTAINERS_COUNT_INVALID));
+    }
+
+    // specifying profile along with cpus/memory raises exception
+    res.setProfile("hbase_finance_large");
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      Assert.fail(EXCEPTION_PREFIX
+          + "application with resource profile along with cpus/memory");
+    } catch (IllegalArgumentException e) {
+      assertEquals(String.format(RestApiErrorMessages
+              .ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED,
+          RestApiConstants.DEFAULT_COMPONENT_NAME),
+          e.getMessage());
+    }
+
+    // currently resource profile alone is not supported.
+    // TODO: remove the next test once resource profile alone is supported.
+    res.setCpus(null);
+    res.setMemory(null);
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      Assert.fail(EXCEPTION_PREFIX + "application with resource profile only");
+    } catch (IllegalArgumentException e) {
+      assertEquals(ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET,
+          e.getMessage());
+    }
+
+    // unset profile here and add cpus/memory back
+    res.setProfile(null);
+    res.setCpus(2);
+    res.setMemory("2gb");
+
+    // null number of containers
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      Assert.fail(EXCEPTION_PREFIX + "null number of containers");
+    } catch (IllegalArgumentException e) {
+      Assert.assertTrue(e.getMessage()
+          .startsWith(ERROR_CONTAINERS_COUNT_INVALID));
+    }
+
+    // negative number of containers
+    app.setNumberOfContainers(-1L);
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      Assert.fail(EXCEPTION_PREFIX + "negative number of containers");
+    } catch (IllegalArgumentException e) {
+      Assert.assertTrue(e.getMessage()
+          .startsWith(ERROR_CONTAINERS_COUNT_INVALID));
+    }
+
+    // everything valid here
+    app.setNumberOfContainers(5L);
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+    } catch (IllegalArgumentException e) {
+      LOG.error("application attributes specified should be valid here", e);
+      Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
+    }
+  }
+
+  @Test
+  public void testArtifacts() throws IOException {
+    SliderFileSystem sfs = initMock(null);
+
+    Application app = new Application();
+    app.setName("name");
+    Resource res = new Resource();
+    app.setResource(res);
+    res.setMemory("512M");
+    app.setNumberOfContainers(3L);
+
+    // no artifact id fails with default type
+    Artifact artifact = new Artifact();
+    app.setArtifact(artifact);
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      Assert.fail(EXCEPTION_PREFIX + "application with no artifact id");
+    } catch (IllegalArgumentException e) {
+      assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage());
+    }
+
+    // no artifact id fails with APPLICATION type
+    artifact.setType(Artifact.TypeEnum.APPLICATION);
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      Assert.fail(EXCEPTION_PREFIX + "application with no artifact id");
+    } catch (IllegalArgumentException e) {
+      assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage());
+    }
+
+    // no artifact id fails with TARBALL type
+    artifact.setType(Artifact.TypeEnum.TARBALL);
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      Assert.fail(EXCEPTION_PREFIX + "application with no artifact id");
+    } catch (IllegalArgumentException e) {
+      assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage());
+    }
+
+    // everything valid here
+    artifact.setType(Artifact.TypeEnum.DOCKER);
+    artifact.setId("docker.io/centos:centos7");
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+    } catch (IllegalArgumentException e) {
+      LOG.error("application attributes specified should be valid here", e);
+      Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
+    }
+
+    // defaults assigned
+    assertEquals(app.getComponents().get(0).getName(),
+        DEFAULT_COMPONENT_NAME);
+    assertEquals(app.getLifetime(), DEFAULT_UNLIMITED_LIFETIME);
+  }
+
+  private static Resource createValidResource() {
+    Resource res = new Resource();
+    res.setMemory("512M");
+    return res;
+  }
+
+  private static Component createValidComponent(String compName) {
+    Component comp = new Component();
+    comp.setName(compName);
+    comp.setResource(createValidResource());
+    comp.setNumberOfContainers(1L);
+    return comp;
+  }
+
+  private static Application createValidApplication(String compName) {
+    Application app = new Application();
+    app.setLaunchCommand("sleep 3600");
+    app.setName("name");
+    app.setResource(createValidResource());
+    app.setNumberOfContainers(1L);
+    if (compName != null) {
+      app.addComponent(createValidComponent(compName));
+    }
+    return app;
+  }
+
+  private static SliderFileSystem initMock(Application ext) throws IOException {
+    SliderFileSystem sfs = createNiceMock(SliderFileSystem.class);
+    FileSystem mockFs = createNiceMock(FileSystem.class);
+    JsonSerDeser<Application> jsonSerDeser = createNiceMock(JsonSerDeser
+        .class);
+    expect(sfs.getFileSystem()).andReturn(mockFs).anyTimes();
+    expect(sfs.buildClusterDirPath(anyObject())).andReturn(
+        new Path("cluster_dir_path")).anyTimes();
+    if (ext != null) {
+      expect(jsonSerDeser.load(anyObject(), anyObject())).andReturn(ext)
+          .anyTimes();
+    }
+    replay(sfs, mockFs, jsonSerDeser);
+    ServiceApiUtil.setJsonSerDeser(jsonSerDeser);
+    return sfs;
+  }
+
+  @Test
+  public void testExternalApplication() throws IOException {
+    Application ext = createValidApplication("comp1");
+    SliderFileSystem sfs = initMock(ext);
+
+    Application app = createValidApplication(null);
+
+    Artifact artifact = new Artifact();
+    artifact.setType(Artifact.TypeEnum.APPLICATION);
+    artifact.setId("id");
+    app.setArtifact(artifact);
+
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+    } catch (IllegalArgumentException e) {
+      Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
+    }
+
+    assertEquals(1, app.getComponents().size());
+    assertNotNull(app.getComponent("comp1"));
+  }
+
+  @Test
+  public void testDuplicateComponents() throws IOException {
+    SliderFileSystem sfs = initMock(null);
+
+    String compName = "comp1";
+    Application app = createValidApplication(compName);
+    app.addComponent(createValidComponent(compName));
+
+    // duplicate component name fails
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+      Assert.fail(EXCEPTION_PREFIX + "application with component collision");
+    } catch (IllegalArgumentException e) {
+      assertEquals("Component name collision: " + compName, e.getMessage());
+    }
+  }
+
+  @Test
+  public void testExternalDuplicateComponent() throws IOException {
+    Application ext = createValidApplication("comp1");
+    SliderFileSystem sfs = initMock(ext);
+
+    Application app = createValidApplication("comp1");
+    Artifact artifact = new Artifact();
+    artifact.setType(Artifact.TypeEnum.APPLICATION);
+    artifact.setId("id");
+    app.getComponent("comp1").setArtifact(artifact);
+
+    // duplicate component name okay in the case of APPLICATION component
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+    } catch (IllegalArgumentException e) {
+      Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
+    }
+  }
+
+  @Test
+  public void testExternalComponent() throws IOException {
+    Application ext = createValidApplication("comp1");
+    SliderFileSystem sfs = initMock(ext);
+
+    Application app = createValidApplication("comp2");
+    Artifact artifact = new Artifact();
+    artifact.setType(Artifact.TypeEnum.APPLICATION);
+    artifact.setId("id");
+    app.setArtifact(artifact);
+
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+    } catch (IllegalArgumentException e) {
+      Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
+    }
+
+    assertEquals(1, app.getComponents().size());
+    // artifact ID not inherited from global
+    assertNotNull(app.getComponent("comp2"));
+
+    // set APPLICATION artifact id on component
+    app.getComponent("comp2").setArtifact(artifact);
+
+    try {
+      ServiceApiUtil.validateAndResolveApplication(app, sfs);
+    } catch (IllegalArgumentException e) {
+      Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
+    }
+
+    assertEquals(1, app.getComponents().size());
+    // original component replaced by external component
+    assertNotNull(app.getComponent("comp1"));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnMiniClusterTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnMiniClusterTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnMiniClusterTestBase.java
index 746a0ec..5e62fc2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnMiniClusterTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnMiniClusterTestBase.java
@@ -37,6 +37,7 @@ import org.apache.slider.common.SliderExitCodes;
 import org.apache.slider.common.SliderXmlConfKeys;
 import org.apache.slider.common.params.ActionFreezeArgs;
 import org.apache.slider.common.params.Arguments;
+import org.apache.slider.common.params.SliderActions;
 import org.apache.slider.common.tools.Duration;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.common.tools.SliderUtils;
@@ -328,11 +329,8 @@ public abstract class YarnMiniClusterTestBase extends SliderTestBase {
    */
   public void stopRunningClusters() {
     for (SliderClient client : clustersToTeardown) {
-      try {
-        maybeStopCluster(client, "", "Teardown at end of test case", true);
-      } catch (Exception e) {
-        LOG.warn("While stopping cluster " + e, e);
-      }
+      maybeStopCluster(client, client.getDeployedClusterName(),
+          "Teardown at end of test case", true);
     }
   }
 
@@ -502,6 +500,62 @@ public abstract class YarnMiniClusterTestBase extends SliderTestBase {
   }
 
   /**
+   * Create or build a cluster (the action is set by the first verb).
+   * @param action operation to invoke: SliderActions.ACTION_CREATE or
+   *               SliderActions.ACTION_BUILD
+   * @param clustername cluster name
+   * @param extraArgs list of extra args to add to the creation command
+   * @param deleteExistingData should the data of any existing cluster
+   * of this name be deleted
+   * @param blockUntilRunning block until the AM is running
+   * @return launcher which will have executed the command.
+   */
+  public ServiceLauncher<SliderClient> createOrBuildCluster(String action,
+      String clustername, List<String> extraArgs, boolean deleteExistingData,
+      boolean blockUntilRunning) throws Throwable {
+    assertNotNull(clustername);
+    assertNotNull(miniCluster);
+    // update action should keep existing data
+    Configuration config = miniCluster.getConfig();
+    if (deleteExistingData && !SliderActions.ACTION_UPDATE.equals(action)) {
+      FileSystem dfs = FileSystem.get(new URI(getFsDefaultName()), config);
+
+      SliderFileSystem sliderFileSystem = new SliderFileSystem(dfs, config);
+      Path clusterDir = sliderFileSystem.buildClusterDirPath(clustername);
+      LOG.info("deleting instance data at {}", clusterDir);
+      //this is a safety check to stop us doing something stupid like deleting /
+      assertTrue(clusterDir.toString().contains("/.slider/"));
+      rigorousDelete(sliderFileSystem, clusterDir, 60000);
+    }
+
+
+    List<String> argsList = new ArrayList<>();
+    argsList.addAll(Arrays.asList(
+        action, clustername,
+        Arguments.ARG_MANAGER, getRMAddr(),
+        Arguments.ARG_FILESYSTEM, getFsDefaultName(),
+        Arguments.ARG_DEBUG));
+
+    argsList.addAll(getExtraCLIArgs());
+
+    if (extraArgs != null) {
+      argsList.addAll(extraArgs);
+    }
+    ServiceLauncher<SliderClient> launcher = launchClientAgainstMiniMR(
+        //config includes RM binding info
+        new YarnConfiguration(config),
+        //varargs list of command line params
+        argsList
+    );
+    assertEquals(0, launcher.getServiceExitCode());
+    SliderClient client = launcher.getService();
+    if (blockUntilRunning) {
+      client.monitorAppToRunning(new Duration(CLUSTER_GO_LIVE_TIME));
+    }
+    return launcher;
+  }
+
+  /**
    * Delete with some pauses and backoff; designed to handle slow delete
    * operation in windows.
    */
@@ -652,28 +706,6 @@ public abstract class YarnMiniClusterTestBase extends SliderTestBase {
     return getTestConfiguration().getTrimmed(getApplicationHomeKey());
   }
 
-  public List<String> getImageCommands() {
-    if (switchToImageDeploy) {
-      // its an image that had better be defined
-      assertNotNull(getArchivePath());
-      if (!imageIsRemote) {
-        // its not remote, so assert it exists
-        File f = new File(getArchivePath());
-        assertTrue(f.exists());
-        return Arrays.asList(Arguments.ARG_IMAGE, f.toURI().toString());
-      } else {
-        assertNotNull(remoteImageURI);
-
-        // if it is remote, then its whatever the archivePath property refers to
-        return Arrays.asList(Arguments.ARG_IMAGE, remoteImageURI.toString());
-      }
-    } else {
-      assertNotNull(getApplicationHome());
-      assertTrue(new File(getApplicationHome()).exists());
-      return Arrays.asList(Arguments.ARG_APP_HOME, getApplicationHome());
-    }
-  }
-
   /**
    * Get the resource configuration dir in the source tree.
    *
@@ -746,14 +778,23 @@ public abstract class YarnMiniClusterTestBase extends SliderTestBase {
       SliderClient sliderClient,
       String clustername,
       String message,
-      boolean force) throws IOException, YarnException {
+      boolean force) {
     if (sliderClient != null) {
       if (SliderUtils.isUnset(clustername)) {
         clustername = sliderClient.getDeployedClusterName();
       }
       //only stop a cluster that exists
       if (SliderUtils.isSet(clustername)) {
-        return clusterActionFreeze(sliderClient, clustername, message, force);
+        try {
+          clusterActionFreeze(sliderClient, clustername, message, force);
+        } catch (Exception e) {
+          LOG.warn("While stopping cluster " + e, e);
+        }
+        try {
+          sliderClient.actionDestroy(clustername);
+        } catch (Exception e) {
+          LOG.warn("While destroying cluster " + e, e);
+        }
       }
     }
     return 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnZKMiniClusterTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnZKMiniClusterTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnZKMiniClusterTestBase.java
index 322b346..cf9e616 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnZKMiniClusterTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnZKMiniClusterTestBase.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.registry.client.api.RegistryConstants;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.slider.common.tools.SliderUtils;
 import org.apache.slider.core.zk.BlockingZKWatcher;
 import org.apache.slider.core.zk.ZKIntegration;
 import org.slf4j.Logger;
@@ -109,9 +108,7 @@ public abstract class YarnZKMiniClusterTestBase extends
                                    int numLogDirs,
                                    boolean startZK,
                                    boolean startHDFS) throws IOException {
-    if (SliderUtils.isUnset(name)) {
-      name = methodName.getMethodName();
-    }
+    name = buildClustername(name);
     createMicroZKCluster("-" + name, conf);
     conf.setBoolean(RegistryConstants.KEY_REGISTRY_ENABLED, true);
     conf.set(RegistryConstants.KEY_REGISTRY_ZK_QUORUM, getZKBinding());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override-resolved.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override-resolved.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override-resolved.json
deleted file mode 100644
index e2a21ea..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override-resolved.json
+++ /dev/null
@@ -1,49 +0,0 @@
-{
-  "name": "app-1",
-  "lifetime": "3600",
-  "configuration": {
-    "properties": {
-      "g1": "a",
-      "g2": "b"
-    }
-  },
-  "resource": {
-    "cpus": 1,
-    "memory": "512"
-  },
-  "number_of_containers": 2,
-  "components": [
-    {
-      "name": "simple",
-      "configuration": {
-        "properties": {
-          "g1": "a",
-          "g2": "b"
-        }
-      }
-    },
-    {
-      "name": "master",
-      "configuration": {
-        "properties": {
-          "g1": "overridden",
-          "g2": "b"
-        }
-      }
-    },
-    {
-      "name": "worker",
-      "resource": {
-        "cpus": 1,
-        "memory": "1024"
-      },
-      "configuration": {
-        "properties": {
-          "g1": "overridden-by-worker",
-          "g2": "b",
-          "timeout": "1000"
-        }
-      }
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override.json
index 552cdef..d7e2fd0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override.json
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override.json
@@ -1,11 +1,29 @@
 {
   "name": "app-1",
   "lifetime": "3600",
+  "launch_command": "sleep 3600",
   "configuration": {
     "properties": {
       "g1": "a",
       "g2": "b"
-    }
+    },
+    "files": [
+      {
+        "type": "PROPERTIES",
+        "dest_file": "file1",
+        "props": {
+          "k1": "v1",
+          "k2": "v2"
+        }
+      },
+      {
+        "type": "XML",
+        "dest_file": "file2",
+        "props": {
+          "k3": "v3"
+        }
+      }
+    ]
   },
   "resource": {
     "cpus": 1,
@@ -14,7 +32,18 @@
   "number_of_containers": 2,
   "components": [
     {
-      "name": "simple"
+      "name": "simple",
+      "configuration": {
+        "files": [
+          {
+            "type": "PROPERTIES",
+            "dest_file": "file1",
+            "props": {
+              "k1": "overridden"
+            }
+          }
+        ]
+      }
     },
     {
       "name": "master",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-resolved.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-resolved.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-resolved.json
deleted file mode 100644
index cd1ab6f..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-resolved.json
+++ /dev/null
@@ -1,81 +0,0 @@
-{
-  "name": "zk-app-1",
-  "lifetime": "3600",
-  "configuration": {
-    "properties": {
-      "internal.chaos.monkey.interval.seconds": "60",
-      "zookeeper.port": "2181",
-      "zookeeper.path": "/yarnapps_small_cluster",
-      "zookeeper.hosts": "zoo1,zoo2,zoo3",
-      "env.MALLOC_ARENA_MAX": "4",
-      "site.hbase.master.startup.retainassign": "true",
-      "site.fs.defaultFS": "hdfs://cluster:8020",
-      "site.fs.default.name": "hdfs://cluster:8020",
-      "site.hbase.master.info.port": "0",
-      "site.hbase.regionserver.info.port": "0"
-    }
-  },
-  "resource": {
-    "cpus": 1,
-    "memory": "512"
-  },
-  "number_of_containers": 2,
-  "components": [
-    {
-      "name": "simple",
-      "number_of_containers": 2,
-      "configuration": {
-        "properties": {
-          "g1": "a",
-          "g2": "b"
-        }
-      }
-    },
-    {
-      "name": "master",
-      "number_of_containers": 1,
-      "resource": {
-        "cpus": 1,
-        "memory": "512"
-      },
-      "configuration": {
-        "properties": {
-          "zookeeper.port": "2181",
-          "zookeeper.path": "/yarnapps_small_cluster",
-          "zookeeper.hosts": "zoo1,zoo2,zoo3",
-          "env.MALLOC_ARENA_MAX": "4",
-          "site.hbase.master.startup.retainassign": "true",
-          "site.fs.defaultFS": "hdfs://cluster:8020",
-          "site.fs.default.name": "hdfs://cluster:8020",
-          "site.hbase.master.info.port": "0",
-          "site.hbase.regionserver.info.port": "0",
-          "jvm.heapsize": "512M"
-        }
-      }
-    },
-    {
-      "name": "worker",
-      "number_of_containers": 5,
-      "resource": {
-        "cpus": 1,
-        "memory": "1024"
-      },
-      "configuration": {
-        "properties": {
-          "g1": "overridden-by-worker",
-          "g2": "b",
-          "zookeeper.port": "2181",
-          "zookeeper.path": "/yarnapps_small_cluster",
-          "zookeeper.hosts": "zoo1,zoo2,zoo3",
-          "env.MALLOC_ARENA_MAX": "4",
-          "site.hbase.master.startup.retainassign": "true",
-          "site.fs.defaultFS": "hdfs://cluster:8020",
-          "site.fs.default.name": "hdfs://cluster:8020",
-          "site.hbase.master.info.port": "0",
-          "site.hbase.regionserver.info.port": "0",
-          "jvm.heapsize": "512M"
-        }
-      }
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app.json
index 90857db..b1d73c5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app.json
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app.json
@@ -1,20 +1,12 @@
 {
   "name": "app-1",
   "lifetime": "3600",
+  "launch_command": "sleep 3600",
   "configuration": {
     "properties": {
       "g1": "a",
       "g2": "b",
-      "internal.chaos.monkey.interval.seconds": "60",
-      "zookeeper.port": "2181",
-      "zookeeper.path": "/yarnapps_small_cluster",
-      "zookeeper.hosts": "zoo1,zoo2,zoo3",
-      "env.MALLOC_ARENA_MAX": "4",
-      "site.hbase.master.startup.retainassign": "true",
-      "site.fs.defaultFS": "hdfs://cluster:8020",
-      "site.fs.default.name": "hdfs://cluster:8020",
-      "site.hbase.master.info.port": "0",
-      "site.hbase.regionserver.info.port": "0"
+      "internal.chaos.monkey.interval.seconds": "60"
     }
   },
   "resource": {
@@ -32,6 +24,7 @@
       "configuration": {
         "properties": {
           "g1": "overridden",
+          "g3": "will-be-overridden",
           "jvm.heapsize": "512M"
         }
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: YARN-6255. Refactor yarn-native-services framework. Contributed by Jian He

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java
index 7bfb410..abad34e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java
@@ -17,16 +17,15 @@
 
 package org.apache.hadoop.yarn.services.api.impl;
 
-import static org.apache.hadoop.yarn.services.utils.RestApiConstants.*;
-import static org.apache.hadoop.yarn.services.utils.RestApiErrorMessages.*;
+import static org.apache.slider.util.RestApiConstants.*;
+import static org.apache.slider.util.RestApiErrorMessages.*;
 
-import java.util.HashMap;
-import java.util.Map;
+import java.util.ArrayList;
 
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.Artifact;
 import org.apache.slider.api.resource.Resource;
-import org.apache.slider.common.SliderKeys;
+import org.apache.slider.util.ServiceApiUtil;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -61,12 +60,10 @@ public class TestApplicationApiService {
   @Test(timeout = 90000)
   public void testValidateApplicationPostPayload() throws Exception {
     Application app = new Application();
-    Map<String, String> compNameArtifactIdMap = new HashMap<>();
 
     // no name
     try {
-      appApiService.validateApplicationPostPayload(app,
-          compNameArtifactIdMap);
+      ServiceApiUtil.validateApplicationPostPayload(app);
       Assert.fail(EXCEPTION_PREFIX + "application with no name");
     } catch (IllegalArgumentException e) {
       Assert.assertEquals(ERROR_APPLICATION_NAME_INVALID, e.getMessage());
@@ -77,8 +74,7 @@ public class TestApplicationApiService {
     for (String badName : badNames) {
       app.setName(badName);
       try {
-        appApiService.validateApplicationPostPayload(app,
-            compNameArtifactIdMap);
+        ServiceApiUtil.validateApplicationPostPayload(app);
         Assert.fail(EXCEPTION_PREFIX + "application with bad name " + badName);
       } catch (IllegalArgumentException e) {
         Assert.assertEquals(ERROR_APPLICATION_NAME_INVALID_FORMAT,
@@ -89,8 +85,7 @@ public class TestApplicationApiService {
     // no artifact
     app.setName("finance_home");
     try {
-      appApiService.validateApplicationPostPayload(app,
-          compNameArtifactIdMap);
+      ServiceApiUtil.validateApplicationPostPayload(app);
       Assert.fail(EXCEPTION_PREFIX + "application with no artifact");
     } catch (IllegalArgumentException e) {
       Assert.assertEquals(ERROR_ARTIFACT_INVALID, e.getMessage());
@@ -100,8 +95,7 @@ public class TestApplicationApiService {
     Artifact artifact = new Artifact();
     app.setArtifact(artifact);
     try {
-      appApiService.validateApplicationPostPayload(app,
-          compNameArtifactIdMap);
+      ServiceApiUtil.validateApplicationPostPayload(app);
       Assert.fail(EXCEPTION_PREFIX + "application with no artifact id");
     } catch (IllegalArgumentException e) {
       Assert.assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage());
@@ -112,8 +106,7 @@ public class TestApplicationApiService {
     artifact.setId("app.io/hbase:facebook_0.2");
     app.setNumberOfContainers(5l);
     try {
-      appApiService.validateApplicationPostPayload(app,
-          compNameArtifactIdMap);
+      ServiceApiUtil.validateApplicationPostPayload(app);
     } catch (IllegalArgumentException e) {
       logger.error("application attributes specified should be valid here", e);
       Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
@@ -124,22 +117,18 @@ public class TestApplicationApiService {
     Assert.assertEquals(app.getComponents().get(0).getName(),
         DEFAULT_COMPONENT_NAME);
     Assert.assertEquals(app.getLifetime(), DEFAULT_UNLIMITED_LIFETIME);
-    Assert.assertEquals("Property not set",
-        app.getConfiguration().getProperties()
-            .get(SliderKeys.COMPONENT_TYPE_KEY),
-        SliderKeys.COMPONENT_TYPE_EXTERNAL_APP);
+    //TODO handle external app
 
     // unset artifact type, default component and no of containers to test other
     // validation logic
     artifact.setType(null);
-    app.setComponents(null);
+    app.setComponents(new ArrayList<>());
     app.setNumberOfContainers(null);
 
     // resource not specified
     artifact.setId("docker.io/centos:centos7");
     try {
-      appApiService.validateApplicationPostPayload(app,
-          compNameArtifactIdMap);
+      ServiceApiUtil.validateApplicationPostPayload(app);
       Assert.fail(EXCEPTION_PREFIX + "application with no resource");
     } catch (IllegalArgumentException e) {
       Assert.assertEquals(ERROR_RESOURCE_INVALID, e.getMessage());
@@ -149,28 +138,18 @@ public class TestApplicationApiService {
     Resource res = new Resource();
     app.setResource(res);
     try {
-      appApiService.validateApplicationPostPayload(app,
-          compNameArtifactIdMap);
+      ServiceApiUtil.validateApplicationPostPayload(app);
       Assert.fail(EXCEPTION_PREFIX + "application with no memory");
     } catch (IllegalArgumentException e) {
       Assert.assertEquals(ERROR_RESOURCE_MEMORY_INVALID, e.getMessage());
     }
 
-    // cpus not specified
-    res.setMemory("2gb");
-    try {
-      appApiService.validateApplicationPostPayload(app,
-          compNameArtifactIdMap);
-      Assert.fail(EXCEPTION_PREFIX + "application with no cpu");
-    } catch (IllegalArgumentException e) {
-      Assert.assertEquals(ERROR_RESOURCE_CPUS_INVALID, e.getMessage());
-    }
-
+    // cpu does not need to be always specified, it's an optional feature in yarn
     // invalid no of cpus
+    res.setMemory("100mb");
     res.setCpus(-2);
     try {
-      appApiService.validateApplicationPostPayload(app,
-          compNameArtifactIdMap);
+      ServiceApiUtil.validateApplicationPostPayload(app);
       Assert.fail(
           EXCEPTION_PREFIX + "application with invalid no of cpups");
     } catch (IllegalArgumentException e) {
@@ -180,8 +159,7 @@ public class TestApplicationApiService {
     // number of containers not specified
     res.setCpus(2);
     try {
-      appApiService.validateApplicationPostPayload(app,
-          compNameArtifactIdMap);
+      ServiceApiUtil.validateApplicationPostPayload(app);
       Assert.fail(
           EXCEPTION_PREFIX + "application with no container count");
     } catch (IllegalArgumentException e) {
@@ -191,8 +169,7 @@ public class TestApplicationApiService {
     // specifying profile along with cpus/memory raises exception
     res.setProfile("hbase_finance_large");
     try {
-      appApiService.validateApplicationPostPayload(app,
-          compNameArtifactIdMap);
+      ServiceApiUtil.validateApplicationPostPayload(app);
       Assert.fail(EXCEPTION_PREFIX
           + "application with resource profile along with cpus/memory");
     } catch (IllegalArgumentException e) {
@@ -205,8 +182,7 @@ public class TestApplicationApiService {
     res.setCpus(null);
     res.setMemory(null);
     try {
-      appApiService.validateApplicationPostPayload(app,
-          compNameArtifactIdMap);
+      ServiceApiUtil.validateApplicationPostPayload(app);
       Assert.fail(EXCEPTION_PREFIX
           + "application with resource profile only - NOT SUPPORTED");
     } catch (IllegalArgumentException e) {
@@ -222,8 +198,7 @@ public class TestApplicationApiService {
     // everything valid here
     app.setNumberOfContainers(5l);
     try {
-      appApiService.validateApplicationPostPayload(app,
-          compNameArtifactIdMap);
+      ServiceApiUtil.validateApplicationPostPayload(app);
     } catch (IllegalArgumentException e) {
       logger.error("application attributes specified should be valid here", e);
       Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java
index d21785f..f6a2cc9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java
@@ -60,14 +60,6 @@ public interface SliderApplicationApi {
   ConfTreeOperations getDesiredResources() throws IOException;
 
   /**
-   * Put an updated resources structure. This triggers a cluster flex
-   * operation
-   * @param updated updated resources
-   * @throws IOException on any problem.
-   */
-  void putDesiredResources(ConfTree updated) throws IOException;
-
-  /**
    * Get the aggregate resolved model
    * @return the aggregate configuration of what was asked for
    * -after resolution has taken place

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java
index 893e706..f384927 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java
@@ -52,12 +52,9 @@ public interface SliderClusterProtocol extends VersionedProtocol {
       Messages.UpgradeContainersRequestProto request) throws IOException,
       YarnException;
 
-  /**
-   * Flex the cluster. 
-   */
-  Messages.FlexClusterResponseProto flexCluster(Messages.FlexClusterRequestProto request)
-      throws IOException;
 
+  Messages.FlexComponentResponseProto flexComponent(
+      Messages.FlexComponentRequestProto request) throws IOException;
 
   /**
    * Get the current cluster status
@@ -121,13 +118,6 @@ public interface SliderClusterProtocol extends VersionedProtocol {
       throws IOException;
 
   /**
-   * Get the instance definition
-   */
-  Messages.GetInstanceDefinitionResponseProto getInstanceDefinition(
-    Messages.GetInstanceDefinitionRequestProto request)
-    throws IOException, YarnException;
-
-  /**
    * Get the application liveness
    * @return current liveness information
    * @throws IOException

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Application.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Application.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Application.java
index cc3355a..502b519 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Application.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Application.java
@@ -28,6 +28,7 @@ import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
 import java.util.ArrayList;
 import java.util.Date;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
@@ -55,11 +56,11 @@ public class Application extends BaseResource {
   private Long numberOfRunningContainers = null;
   private Long lifetime = null;
   private PlacementPolicy placementPolicy = null;
-  private List<Component> components = null;
-  private Configuration configuration = null;
+  private List<Component> components = new ArrayList<>();
+  private Configuration configuration = new Configuration();
   private List<Container> containers = new ArrayList<>();
   private ApplicationState state = null;
-  private Map<String, String> quicklinks = null;
+  private Map<String, String> quicklinks = new HashMap<>();
   private String queue = null;
 
   /**
@@ -285,6 +286,15 @@ public class Application extends BaseResource {
     this.components = components;
   }
 
+  public Component getComponent(String name) {
+    for (Component component : components) {
+      if (component.getName().equals(name)) {
+        return component;
+      }
+    }
+    return null;
+  }
+
   /**
    * Config properties of an application. Configurations provided at the
    * application/global level are available to all the components. Specific

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Component.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Component.java
index 4f50564..e7f3796 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Component.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Component.java
@@ -22,6 +22,7 @@ import io.swagger.annotations.ApiModelProperty;
 
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.Objects;
 
@@ -49,15 +50,17 @@ public class Component implements Serializable {
   private String name = null;
   private List<String> dependencies = new ArrayList<String>();
   private ReadinessCheck readinessCheck = null;
-  private Artifact artifact = null;
+  private Artifact artifact = new Artifact();
   private String launchCommand = null;
-  private Resource resource = null;
+  private Resource resource = new Resource();
   private Long numberOfContainers = null;
-  private Boolean uniqueComponentSupport = null;
-  private Boolean runPrivilegedContainer = null;
+  private Boolean uniqueComponentSupport = false;
+  private Boolean runPrivilegedContainer = false;
   private PlacementPolicy placementPolicy = null;
-  private Configuration configuration = null;
+  private Configuration configuration = new Configuration();
   private List<String> quicklinks = new ArrayList<String>();
+  private List<Container> containers =
+      Collections.synchronizedList(new ArrayList<Container>());
 
   /**
    * Name of the application component (mandatory).
@@ -196,6 +199,29 @@ public class Component implements Serializable {
     this.numberOfContainers = numberOfContainers;
   }
 
+  @ApiModelProperty(example = "null", value = "Containers of a started component. Specifying a value for this attribute for the POST payload raises a validation error. This blob is available only in the GET response of a started application.")
+  @JsonProperty("containers")
+  public List<Container> getContainers() {
+    return containers;
+  }
+
+  public void setContainers(List<Container> containers) {
+    this.containers = containers;
+  }
+
+  public void addContainer(Container container) {
+    this.containers.add(container);
+  }
+
+  public Container getContainer(String id) {
+    for (Container container : containers) {
+      if (container.getId().equals(id)) {
+        return container;
+      }
+    }
+    return null;
+  }
+
   /**
    * Certain applications need to define multiple components using the same
    * artifact and resource profile, differing only in configurations. In such
@@ -354,6 +380,8 @@ public class Component implements Serializable {
     sb.append("    resource: ").append(toIndentedString(resource)).append("\n");
     sb.append("    numberOfContainers: ")
         .append(toIndentedString(numberOfContainers)).append("\n");
+    sb.append("    containers: ").append(toIndentedString(containers))
+        .append("\n");
     sb.append("    uniqueComponentSupport: ")
         .append(toIndentedString(uniqueComponentSupport)).append("\n");
     sb.append("    runPrivilegedContainer: ")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java
index bad68c1..cdc96b8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java
@@ -21,6 +21,7 @@ import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
 
 import java.io.Serializable;
+import java.util.Map;
 import java.util.Objects;
 
 import javax.xml.bind.annotation.XmlElement;
@@ -62,7 +63,7 @@ public class ConfigFile implements Serializable {
   private TypeEnum type = null;
   private String destFile = null;
   private String srcFile = null;
-  private Object props = null;
+  private Map<String, String> props = null;
 
   /**
    * Config file in the standard format like xml, properties, json, yaml,
@@ -104,6 +105,8 @@ public class ConfigFile implements Serializable {
   }
 
   /**
+   * TODO this probably is not required for non-template configs. It is now used as symlink for localization for non-template configs - we could infer the name from destFile instead
+   *
    * Required for type template. This provides the source location of the
    * template which needs to be mounted as dest_file post property
    * substitutions. Typically the src_file would point to a source controlled
@@ -131,21 +134,36 @@ public class ConfigFile implements Serializable {
    * src_file is mandatory and the src_file content is dumped to dest_file post
    * property substitutions.
    **/
-  public ConfigFile props(Object props) {
+  public ConfigFile props(Map<String, String> props) {
     this.props = props;
     return this;
   }
 
   @ApiModelProperty(example = "null", value = "A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If the type is template then the attribute src_file is mandatory and the src_file content is dumped to dest_file post property substitutions.")
   @JsonProperty("props")
-  public Object getProps() {
+  public Map<String, String> getProps() {
     return props;
   }
 
-  public void setProps(Object props) {
+  public void setProps(Map<String, String> props) {
     this.props = props;
   }
 
+  public long getLong(String name, long defaultValue) {
+    if (name == null) {
+      return defaultValue;
+    }
+    String value = props.get(name.trim());
+    return Long.parseLong(value);
+  }
+
+  public boolean getBoolean(String name, boolean defaultValue) {
+    if (name == null) {
+      return defaultValue;
+    }
+    return Boolean.valueOf(props.get(name.trim()));
+  }
+
   @Override
   public boolean equals(java.lang.Object o) {
     if (this == o) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
index c4f2ad4..c43bd64 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
@@ -29,6 +29,7 @@ import java.util.Objects;
 
 import com.fasterxml.jackson.annotation.JsonInclude;
 import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.commons.lang.StringUtils;
 
 /**
  * Set of configuration properties that can be injected into the application
@@ -104,6 +105,35 @@ public class Configuration implements Serializable {
     this.files = files;
   }
 
+  public long getPropertyLong(String name, long defaultValue) {
+    if (name == null) {
+      return defaultValue;
+    }
+    String value = properties.get(name.trim());
+    if (StringUtils.isEmpty(value)) {
+      return defaultValue;
+    }
+    return Long.parseLong(value);
+  }
+
+  public String getProperty(String name, String defaultValue) {
+    if (name == null) {
+      return defaultValue;
+    }
+    return properties.get(name.trim());
+  }
+
+  public void setProperty(String name, String value) {
+    properties.put(name, value);
+  }
+
+  public String getProperty(String name) {
+    if (name == null) {
+      return null;
+    }
+    return properties.get(name.trim());
+  }
+
   @Override
   public boolean equals(java.lang.Object o) {
     if (this == o) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Resource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Resource.java
index 190121d..c255369 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Resource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Resource.java
@@ -39,7 +39,7 @@ public class Resource extends BaseResource implements Cloneable {
   private static final long serialVersionUID = -6431667797380250037L;
 
   private String profile = null;
-  private Integer cpus = null;
+  private Integer cpus = 1;
   private String memory = null;
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: YARN-6613. Update json validation for new native services providers. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/default.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/default.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/default.json
new file mode 100644
index 0000000..16f0efc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/default.json
@@ -0,0 +1,16 @@
+{
+  "name": "default-app-1",
+  "lifetime": "3600",
+  "components" :
+  [
+    {
+      "name": "SLEEP",
+      "number_of_containers": 1,
+      "launch_command": "sleep 3600",
+      "resource": {
+        "cpus": 2,
+        "memory": "256"
+      }
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/external0.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/external0.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/external0.json
new file mode 100644
index 0000000..1f9dfeb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/external0.json
@@ -0,0 +1,8 @@
+{
+  "name": "external-0",
+  "lifetime": "3600",
+  "artifact": {
+    "type": "APPLICATION",
+    "id": "app-1"
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/external1.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/external1.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/external1.json
new file mode 100644
index 0000000..03ebce5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/external1.json
@@ -0,0 +1,30 @@
+{
+  "name": "external-1",
+  "lifetime": "3600",
+  "components": [
+    {
+      "name": "simple",
+      "artifact": {
+        "type": "APPLICATION",
+        "id": "app-1"
+      }
+    },
+    {
+      "name": "master",
+      "configuration": {
+        "properties": {
+          "g3": "is-overridden"
+        }
+      }
+    },
+    {
+      "name": "other",
+      "launch_command": "sleep 3600",
+      "number_of_containers": 2,
+      "resource": {
+        "cpus": 1,
+        "memory": "512"
+      }
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/external2.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/external2.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/external2.json
new file mode 100644
index 0000000..9e61fba
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/external2.json
@@ -0,0 +1,22 @@
+{
+  "name": "external-2",
+  "lifetime": "3600",
+  "components": [
+    {
+      "name": "ext",
+      "artifact": {
+        "type": "APPLICATION",
+        "id": "external-1"
+      }
+    },
+    {
+      "name": "another",
+      "launch_command": "sleep 3600",
+      "number_of_containers": 1,
+      "resource": {
+        "cpus": 1,
+        "memory": "512"
+      }
+    }
+  ]
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: YARN-6255. Refactor yarn-native-services framework. Contributed by Jian He

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiConstants.java
new file mode 100644
index 0000000..7e8cf5b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiConstants.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.util;
+
+public interface RestApiConstants {
+  String CONTEXT_ROOT = "/services/v1";
+  String APPLICATIONS_API_RESOURCE_PATH = "/applications";
+  String CONTAINERS_API_RESOURCE_PATH = "/containers";
+  String SLIDER_APPMASTER_COMPONENT_NAME = "slider-appmaster";
+  String SLIDER_CONFIG_SCHEMA = "http://example.org/specification/v2.0.0";
+  String METAINFO_SCHEMA_VERSION = "2.1";
+  String COMPONENT_TYPE_YARN_DOCKER = "yarn_docker";
+
+  String DEFAULT_START_CMD = "/bootstrap/privileged-centos6-sshd";
+  String DEFAULT_COMPONENT_NAME = "DEFAULT";
+  String DEFAULT_IMAGE = "centos:centos6";
+  String DEFAULT_NETWORK = "bridge";
+  String DEFAULT_COMMAND_PATH = "/usr/bin/docker";
+  String DEFAULT_USE_NETWORK_SCRIPT = "yes";
+
+  String PLACEHOLDER_APP_NAME = "${APP_NAME}";
+  String PLACEHOLDER_APP_COMPONENT_NAME = "${APP_COMPONENT_NAME}";
+  String PLACEHOLDER_COMPONENT_ID = "${COMPONENT_ID}";
+
+  String PROPERTY_REST_SERVICE_HOST = "REST_SERVICE_HOST";
+  String PROPERTY_REST_SERVICE_PORT = "REST_SERVICE_PORT";
+  String PROPERTY_APP_LIFETIME = "docker.lifetime";
+  String PROPERTY_APP_RUNAS_USER = "APP_RUNAS_USER";
+  Long DEFAULT_UNLIMITED_LIFETIME = -1l;
+
+  Integer HTTP_STATUS_CODE_ACCEPTED = 202;
+  String ARTIFACT_TYPE_SLIDER_ZIP = "slider-zip";
+
+  Integer GET_APPLICATIONS_THREAD_POOL_SIZE = 200;
+
+  String PROPERTY_PYTHON_PATH = "python.path";
+  String PROPERTY_DNS_DEPENDENCY = "site.global.dns.dependency";
+
+  String COMMAND_ORDER_SUFFIX_START = "-START";
+  String COMMAND_ORDER_SUFFIX_STARTED = "-STARTED";
+  String EXPORT_GROUP_NAME = "QuickLinks";
+
+  Integer ERROR_CODE_APP_DOES_NOT_EXIST = 404001;
+  Integer ERROR_CODE_APP_IS_NOT_RUNNING = 404002;
+  Integer ERROR_CODE_APP_SUBMITTED_BUT_NOT_RUNNING_YET = 404003;
+  Integer ERROR_CODE_APP_NAME_INVALID = 404004;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
new file mode 100644
index 0000000..0f6247d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.util;
+
+public interface RestApiErrorMessages {
+  String ERROR_APPLICATION_NAME_INVALID =
+      "Application name is either empty or not provided";
+  String ERROR_APPLICATION_NAME_INVALID_FORMAT =
+      "Application name is not valid - only lower case letters, digits,"
+          + " underscore and hyphen are allowed";
+
+  String ERROR_APPLICATION_NOT_RUNNING = "Application not running";
+  String ERROR_APPLICATION_DOES_NOT_EXIST = "Application not found";
+  String ERROR_APPLICATION_IN_USE = "Application already exists in started"
+      + " state";
+  String ERROR_APPLICATION_INSTANCE_EXISTS = "Application already exists in"
+      + " stopped/failed state (either restart with PUT or destroy with DELETE"
+      + " before creating a new one)";
+
+  String ERROR_SUFFIX_FOR_COMPONENT =
+      " for component %s (nor at the global level)";
+  String ERROR_ARTIFACT_INVALID = "Artifact is not provided";
+  String ERROR_ARTIFACT_FOR_COMP_INVALID =
+      ERROR_ARTIFACT_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+  String ERROR_ARTIFACT_ID_INVALID =
+      "Artifact id (like docker image name) is either empty or not provided";
+  String ERROR_ARTIFACT_ID_FOR_COMP_INVALID =
+      ERROR_ARTIFACT_ID_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+
+  String ERROR_RESOURCE_INVALID = "Resource is not provided";
+  String ERROR_RESOURCE_FOR_COMP_INVALID =
+      ERROR_RESOURCE_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+  String ERROR_RESOURCE_MEMORY_INVALID =
+      "Application resource or memory not provided";
+  String ERROR_RESOURCE_CPUS_INVALID =
+      "Application resource or cpus not provided";
+  String ERROR_RESOURCE_CPUS_INVALID_RANGE =
+      "Unacceptable no of cpus specified, either zero or negative";
+  String ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID =
+      ERROR_RESOURCE_MEMORY_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+  String ERROR_RESOURCE_CPUS_FOR_COMP_INVALID =
+      ERROR_RESOURCE_CPUS_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+  String ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE =
+      ERROR_RESOURCE_CPUS_INVALID_RANGE
+          + " for component %s (or at the global level)";
+  String ERROR_CONTAINERS_COUNT_INVALID =
+      "Required no of containers not specified";
+  String ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID =
+      ERROR_CONTAINERS_COUNT_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+
+  String ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED =
+      "Cannot specify" + " cpus/memory along with profile";
+  String ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED =
+      ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED
+          + " for component %s";
+  String ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET =
+      "Resource profile is not " + "supported yet. Please specify cpus/memory.";
+
+  String ERROR_NULL_ARTIFACT_ID =
+      "Artifact Id can not be null if artifact type is none";
+  String ERROR_ABSENT_NUM_OF_INSTANCE =
+      "Num of instances should appear either globally or per component";
+  String ERROR_ABSENT_LAUNCH_COMMAND =
+      "launch command should appear if type is slider-zip or none";
+
+  String ERROR_QUICKLINKS_FOR_COMP_INVALID = "Quicklinks specified at"
+      + " component level, needs corresponding values set at application level";
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
new file mode 100644
index 0000000..776ce00
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
@@ -0,0 +1,203 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.util;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang.StringUtils;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Artifact;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.api.resource.Configuration;
+import org.apache.slider.api.resource.Resource;
+import org.apache.slider.common.tools.SliderUtils;
+
+public class ServiceApiUtil {
+
+  @VisibleForTesting
+  public static void validateApplicationPostPayload(Application application) {
+    if (StringUtils.isEmpty(application.getName())) {
+      throw new IllegalArgumentException(
+          RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID);
+    }
+    if (!SliderUtils.isClusternameValid(application.getName())) {
+      throw new IllegalArgumentException(
+          RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID_FORMAT);
+    }
+
+    // If the application has no components do top-level checks
+    if (!hasComponent(application)) {
+      // artifact
+      if (application.getArtifact() == null) {
+        throw new IllegalArgumentException(
+            RestApiErrorMessages.ERROR_ARTIFACT_INVALID);
+      }
+      if (StringUtils.isEmpty(application.getArtifact().getId())) {
+        throw new IllegalArgumentException(
+            RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID);
+      }
+
+      // If artifact is of type APPLICATION, add a slider specific property
+      if (application.getArtifact().getType()
+          == Artifact.TypeEnum.APPLICATION) {
+        if (application.getConfiguration() == null) {
+          application.setConfiguration(new Configuration());
+        }
+      }
+      // resource
+      validateApplicationResource(application.getResource(), null,
+          application.getArtifact().getType());
+
+      // container size
+      if (application.getNumberOfContainers() == null) {
+        throw new IllegalArgumentException(
+            RestApiErrorMessages.ERROR_CONTAINERS_COUNT_INVALID);
+      }
+
+      // Since it is a simple app with no components, create a default component
+      application.getComponents().add(createDefaultComponent(application));
+    } else {
+      // If the application has components, then run checks for each component.
+      // Let global values take effect if component level values are not
+      // provided.
+      Artifact globalArtifact = application.getArtifact();
+      Resource globalResource = application.getResource();
+      Long globalNumberOfContainers = application.getNumberOfContainers();
+      for (Component comp : application.getComponents()) {
+        // artifact
+        if (comp.getArtifact() == null) {
+          comp.setArtifact(globalArtifact);
+        }
+        // If still null raise validation exception
+        if (comp.getArtifact() == null) {
+          throw new IllegalArgumentException(String
+              .format(RestApiErrorMessages.ERROR_ARTIFACT_FOR_COMP_INVALID,
+                  comp.getName()));
+        }
+        if (StringUtils.isEmpty(comp.getArtifact().getId())) {
+          throw new IllegalArgumentException(String
+              .format(RestApiErrorMessages.ERROR_ARTIFACT_ID_FOR_COMP_INVALID,
+                  comp.getName()));
+        }
+
+        // If artifact is of type APPLICATION, add a slider specific property
+        if (comp.getArtifact().getType() == Artifact.TypeEnum.APPLICATION) {
+          if (comp.getConfiguration() == null) {
+            comp.setConfiguration(new Configuration());
+          }
+          comp.setName(comp.getArtifact().getId());
+        }
+
+        // resource
+        if (comp.getResource() == null) {
+          comp.setResource(globalResource);
+        }
+        validateApplicationResource(comp.getResource(), comp,
+            comp.getArtifact().getType());
+
+        // container count
+        if (comp.getNumberOfContainers() == null) {
+          comp.setNumberOfContainers(globalNumberOfContainers);
+        }
+        if (comp.getNumberOfContainers() == null) {
+          throw new IllegalArgumentException(String.format(
+              RestApiErrorMessages.ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID,
+              comp.getName()));
+        }
+      }
+    }
+
+    // Application lifetime if not specified, is set to unlimited lifetime
+    if (application.getLifetime() == null) {
+      application.setLifetime(RestApiConstants.DEFAULT_UNLIMITED_LIFETIME);
+    }
+  }
+
+  private static void validateApplicationResource(Resource resource,
+      Component comp, Artifact.TypeEnum artifactType) {
+    // Only apps/components of type APPLICATION can skip resource requirement
+    if (resource == null && artifactType == Artifact.TypeEnum.APPLICATION) {
+      return;
+    }
+    if (resource == null) {
+      throw new IllegalArgumentException(
+          comp == null ? RestApiErrorMessages.ERROR_RESOURCE_INVALID : String
+              .format(RestApiErrorMessages.ERROR_RESOURCE_FOR_COMP_INVALID,
+                  comp.getName()));
+    }
+    // One and only one of profile OR cpus & memory can be specified. Specifying
+    // both raises validation error.
+    if (StringUtils.isNotEmpty(resource.getProfile()) && (
+        resource.getCpus() != null || StringUtils
+            .isNotEmpty(resource.getMemory()))) {
+      throw new IllegalArgumentException(comp == null ?
+          RestApiErrorMessages.ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED :
+          String.format(
+              RestApiErrorMessages.ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED,
+              comp.getName()));
+    }
+    // Currently resource profile is not supported yet, so we will raise
+    // validation error if only resource profile is specified
+    if (StringUtils.isNotEmpty(resource.getProfile())) {
+      throw new IllegalArgumentException(
+          RestApiErrorMessages.ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET);
+    }
+
+    String memory = resource.getMemory();
+    Integer cpus = resource.getCpus();
+    if (StringUtils.isEmpty(memory)) {
+      throw new IllegalArgumentException(
+          comp == null ? RestApiErrorMessages.ERROR_RESOURCE_MEMORY_INVALID :
+              String.format(
+                  RestApiErrorMessages.ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID,
+                  comp.getName()));
+    }
+    if (cpus == null) {
+      throw new IllegalArgumentException(
+          comp == null ? RestApiErrorMessages.ERROR_RESOURCE_CPUS_INVALID :
+              String.format(
+                  RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID,
+                  comp.getName()));
+    }
+    if (cpus <= 0) {
+      throw new IllegalArgumentException(comp == null ?
+          RestApiErrorMessages.ERROR_RESOURCE_CPUS_INVALID_RANGE : String
+          .format(
+              RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE,
+              comp.getName()));
+    }
+  }
+
+  public static boolean hasComponent(Application application) {
+    if (application.getComponents() == null || application.getComponents()
+        .isEmpty()) {
+      return false;
+    }
+    return true;
+  }
+
+  public static Component createDefaultComponent(Application app) {
+    Component comp = new Component();
+    comp.setName(RestApiConstants.DEFAULT_COMPONENT_NAME);
+    comp.setArtifact(app.getArtifact());
+    comp.setResource(app.getResource());
+    comp.setNumberOfContainers(app.getNumberOfContainers());
+    comp.setLaunchCommand(app.getLaunchCommand());
+    return comp;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
index b8bdc59..bfcab23 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
@@ -80,22 +80,14 @@ message UpgradeContainersRequestProto {
 message UpgradeContainersResponseProto {
 }
 
-/**
- * flex the cluster
- */
-message FlexClusterRequestProto {
-  required string clusterSpec = 1;
+message FlexComponentRequestProto {
+  optional string name = 1;
+  optional int32 numberOfContainers = 2;
 }
 
-
-/**
- * flex the cluster
- */
-message FlexClusterResponseProto {
-  required bool response = 1;
+message FlexComponentResponseProto {
 }
 
-
 /**
  * void request
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto
index d68da2b..8a0faf9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto
@@ -61,24 +61,14 @@ service SliderClusterProtocolPB {
   rpc upgradeContainers(UpgradeContainersRequestProto) 
     returns(UpgradeContainersResponseProto);
 
-  /**
-   * Flex the cluster. 
-   */
-  rpc flexCluster(FlexClusterRequestProto) 
-    returns(FlexClusterResponseProto);
+  rpc flexComponent(FlexComponentRequestProto) returns (FlexComponentResponseProto);
 
   /**
    * Get the current cluster status
    */
   rpc getJSONClusterStatus(GetJSONClusterStatusRequestProto)
     returns(GetJSONClusterStatusResponseProto);
-      
-  /**
-   * Get the instance definition
-   */
-  rpc getInstanceDefinition(GetInstanceDefinitionRequestProto)
-   returns(GetInstanceDefinitionResponseProto);
-      
+
   /**
    * List all running nodes in a role
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/slider.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/slider.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/slider.xml
index a2517d5..96bfe0f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/slider.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/slider.xml
@@ -24,10 +24,6 @@
     <value>true</value>
   </property>
   <property>
-    <name>slider.provider.agent</name>
-    <value>org.apache.slider.providers.agent.AgentProviderFactory</value>
-  </property>
-  <property>
     <name>slider.provider.docker</name>
     <value>org.apache.slider.providers.docker.DockerProviderFactory</value>
   </property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncher.java
deleted file mode 100644
index b955931..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncher.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.core.launch;
-
-import java.lang.reflect.Method;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.api.records.LogAggregationContext;
-import org.apache.hadoop.yarn.client.api.YarnClientApplication;
-import org.apache.slider.api.ResourceKeys;
-import org.apache.slider.client.SliderYarnClientImpl;
-import org.apache.slider.common.SliderKeys;
-import org.easymock.EasyMock;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestAppMasterLauncher {
-  SliderYarnClientImpl mockYarnClient;
-  YarnClientApplication yarnClientApp;
-  ApplicationSubmissionContext appSubmissionContext;
-  Set<String> tags = Collections.emptySet();
-  AppMasterLauncher appMasterLauncher = null;
-  boolean isOldApi = true;
-  Method rolledLogsIncludeMethod = null;
-  Method rolledLogsExcludeMethod = null;
-
-  @Before
-  public void initialize() throws Exception {
-    mockYarnClient = EasyMock.createNiceMock(SliderYarnClientImpl.class);
-    yarnClientApp = EasyMock.createNiceMock(YarnClientApplication.class);
-    appSubmissionContext = EasyMock
-        .createNiceMock(ApplicationSubmissionContext.class);
-    EasyMock.expect(yarnClientApp.getApplicationSubmissionContext())
-        .andReturn(appSubmissionContext).once();
-    EasyMock.expect(mockYarnClient.createApplication())
-        .andReturn(yarnClientApp).once();
-
-    try {
-      LogAggregationContext.class.getMethod("newInstance", String.class,
-          String.class, String.class, String.class);
-      isOldApi = false;
-      rolledLogsIncludeMethod = LogAggregationContext.class
-          .getMethod("getRolledLogsIncludePattern");
-      rolledLogsExcludeMethod = LogAggregationContext.class
-          .getMethod("getRolledLogsExcludePattern");
-    } catch (Exception e) {
-      isOldApi = true;
-    }
-  }
-
-  /**
-   * These tests will probably fail when compiled against hadoop 2.7+. Please
-   * refer to SLIDER-810. It has been purposely not modified so that it fails
-   * and that someone needs to modify the code in
-   * {@code AbstractLauncher#extractLogAggregationContext(Map)}. Comments are
-   * provided in that method as to what needs to be done.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testExtractLogAggregationContext() throws Exception {
-    Map<String, String> options = new HashMap<String, String>();
-    options.put(ResourceKeys.YARN_LOG_INCLUDE_PATTERNS,
-        " | slider*.txt  |agent.out| |");
-    options.put(ResourceKeys.YARN_LOG_EXCLUDE_PATTERNS,
-        "command*.json|  agent.log*        |     ");
-
-    EasyMock.replay(mockYarnClient, appSubmissionContext, yarnClientApp);
-    appMasterLauncher = new AppMasterLauncher("cl1", SliderKeys.APP_TYPE, null,
-        null, mockYarnClient, false, null, options, tags, null);
-
-    // Verify the include/exclude patterns
-    String expectedInclude = "slider*.txt|agent.out";
-    String expectedExclude = "command*.json|agent.log*";
-    assertPatterns(expectedInclude, expectedExclude);
-
-    EasyMock.verify(mockYarnClient, appSubmissionContext, yarnClientApp);
-
-  }
-
-  @Test
-  public void testExtractLogAggregationContextEmptyIncludePattern()
-      throws Exception {
-    Map<String, String> options = new HashMap<String, String>();
-    options.put(ResourceKeys.YARN_LOG_INCLUDE_PATTERNS, " ");
-    options.put(ResourceKeys.YARN_LOG_EXCLUDE_PATTERNS,
-        "command*.json|  agent.log*        |     ");
-
-    EasyMock.replay(mockYarnClient, appSubmissionContext, yarnClientApp);
-    appMasterLauncher = new AppMasterLauncher("cl1", SliderKeys.APP_TYPE, null,
-        null, mockYarnClient, false, null, options, tags, null);
-
-    // Verify the include/exclude patterns
-    String expectedInclude = isOldApi ? "" : ".*";
-    String expectedExclude = "command*.json|agent.log*";
-    assertPatterns(expectedInclude, expectedExclude);
-
-    EasyMock.verify(mockYarnClient, appSubmissionContext, yarnClientApp);
-  }
-
-  @Test
-  public void testExtractLogAggregationContextEmptyIncludeAndExcludePattern()
-      throws Exception {
-    Map<String, String> options = new HashMap<String, String>();
-    options.put(ResourceKeys.YARN_LOG_INCLUDE_PATTERNS, "");
-    options.put(ResourceKeys.YARN_LOG_EXCLUDE_PATTERNS, "  ");
-
-    EasyMock.replay(mockYarnClient, appSubmissionContext, yarnClientApp);
-    appMasterLauncher = new AppMasterLauncher("cl1", SliderKeys.APP_TYPE, null,
-        null, mockYarnClient, false, null, options, tags, null);
-
-    // Verify the include/exclude patterns
-    String expectedInclude = isOldApi ? "" : ".*";
-    String expectedExclude = "";
-    assertPatterns(expectedInclude, expectedExclude);
-
-    EasyMock.verify(mockYarnClient, appSubmissionContext, yarnClientApp);
-  }
-
-  private void assertPatterns(String expectedIncludePattern,
-      String expectedExcludePattern) throws Exception {
-    if (isOldApi) {
-      Assert.assertEquals(expectedIncludePattern,
-          appMasterLauncher.logAggregationContext.getIncludePattern());
-      Assert.assertEquals(expectedExcludePattern,
-          appMasterLauncher.logAggregationContext.getExcludePattern());
-    } else {
-      Assert.assertEquals(expectedIncludePattern,
-          (String) rolledLogsIncludeMethod
-              .invoke(appMasterLauncher.logAggregationContext));
-      Assert.assertEquals(expectedExcludePattern,
-          (String) rolledLogsExcludeMethod
-              .invoke(appMasterLauncher.logAggregationContext));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncherWithAmReset.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncherWithAmReset.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncherWithAmReset.java
deleted file mode 100644
index a8f6b26..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncherWithAmReset.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.core.launch;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
-import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.client.api.YarnClientApplication;
-import org.apache.hadoop.yarn.util.Records;
-import org.apache.slider.api.ResourceKeys;
-import org.apache.slider.client.SliderYarnClientImpl;
-import org.apache.slider.common.SliderKeys;
-import org.easymock.EasyMock;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-public class TestAppMasterLauncherWithAmReset {
-  SliderYarnClientImpl mockYarnClient;
-  YarnClientApplication yarnClientApp;
-  ApplicationSubmissionContext appSubmissionContext;
-  GetNewApplicationResponse newApp;
-  Set<String> tags = Collections.emptySet();
-  AppMasterLauncher appMasterLauncher = null;
-  boolean isOldApi = true;
-
-  @Before
-  public void initialize() throws Exception {
-    mockYarnClient = EasyMock.createNiceMock(SliderYarnClientImpl.class);
-    yarnClientApp = EasyMock.createNiceMock(YarnClientApplication.class);
-    newApp = EasyMock.createNiceMock(GetNewApplicationResponse.class);
-    EasyMock.expect(mockYarnClient.createApplication())
-        .andReturn(new YarnClientApplication(newApp,
-        Records.newRecord(ApplicationSubmissionContext.class)));
-  }
-
-  @Test
-  public void testExtractYarnResourceManagerAmRetryCountWindowMs() throws
-      Exception {
-    Map<String, String> options = new HashMap<String, String>();
-    final String expectedInterval = Integer.toString (120000);
-    options.put(ResourceKeys.YARN_RESOURCEMANAGER_AM_RETRY_COUNT_WINDOW_MS,
-        expectedInterval);
-    EasyMock.replay(mockYarnClient, yarnClientApp);
-
-    appMasterLauncher = new AppMasterLauncher("am1", SliderKeys.APP_TYPE, null,
-        null, mockYarnClient, false, null, options, tags, null);
-
-    ApplicationSubmissionContext ctx = appMasterLauncher.application
-        .getApplicationSubmissionContext();
-    String retryIntervalWindow = Long.toString(ctx
-        .getAttemptFailuresValidityInterval());
-    Assert.assertEquals(expectedInterval, retryIntervalWindow);
-  }
-
-  @Test
-  public void testExtractYarnResourceManagerAmRetryCountWindowMsDefaultValue()
-      throws Exception {
-    Map<String, String> options = new HashMap<String, String>();
-    EasyMock.replay(mockYarnClient, yarnClientApp);
-
-    appMasterLauncher = new AppMasterLauncher("am1", SliderKeys.APP_TYPE, null,
-        null, mockYarnClient, false, null, options, tags, null);
-
-    ApplicationSubmissionContext ctx = appMasterLauncher.application
-        .getApplicationSubmissionContext();
-    long retryIntervalWindow = ctx.getAttemptFailuresValidityInterval();
-    Assert.assertEquals(ResourceKeys.DEFAULT_AM_RETRY_COUNT_WINDOW_MS,
-        retryIntervalWindow);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/TestServiceRecordAttributes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/TestServiceRecordAttributes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/TestServiceRecordAttributes.java
deleted file mode 100644
index a1986cd..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/TestServiceRecordAttributes.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.slider.server.appmaster;
-
-import org.apache.hadoop.registry.client.types.ServiceRecord;
-import org.apache.slider.common.SliderKeys;
-import org.apache.slider.core.conf.MapOperations;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- *
- */
-public class TestServiceRecordAttributes extends Assert {
-
-  @Test
-  public void testAppConfigProvidedServiceRecordAttributes() throws Exception {
-    Map<String, String> options = new HashMap<>();
-    options.put("slider.some.arbitrary.option", "arbitrary value");
-    options.put("service.record.attribute.one_attribute", "one_attribute_value");
-    options.put("service.record.attribute.second_attribute", "second_attribute_value");
-    MapOperations serviceProps = new MapOperations(SliderKeys.COMPONENT_AM, options);
-    options = new HashMap<>();
-    options.put("some.component.attribute", "component_attribute_value");
-    options.put("service.record.attribute.component_attribute", "component_attribute_value");
-    MapOperations compProps = new MapOperations("TEST_COMP", options);
-
-    SliderAppMaster appMaster = new SliderAppMaster();
-
-    ServiceRecord appServiceRecord = new ServiceRecord();
-
-    appMaster.setProvidedServiceRecordAttributes(serviceProps, appServiceRecord);
-
-    assertNull("property should not be attribute",
-               appServiceRecord.get("slider.some.arbitrary.option"));
-    assertEquals("wrong value", "one_attribute_value",
-                 appServiceRecord.get("one_attribute"));
-    assertEquals("wrong value", "second_attribute_value",
-                 appServiceRecord.get("second_attribute"));
-
-    ServiceRecord compServiceRecord = new ServiceRecord();
-
-    appMaster.setProvidedServiceRecordAttributes(compProps, compServiceRecord);
-
-    assertNull("should not be attribute",
-               compServiceRecord.get("some.component.attribute"));
-    assertEquals("wrong value", "component_attribute_value",
-                 compServiceRecord.get("component_attribute"));
-
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: YARN-6255. Refactor yarn-native-services framework. Contributed by Jian He

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
index f67ea58..4922c2d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.registry.client.api.RegistryOperations;
 import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
 import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
@@ -62,6 +64,7 @@ import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
 import org.apache.hadoop.yarn.client.api.async.NMClientAsync;
 import org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl;
@@ -77,13 +80,12 @@ import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.webapp.WebAppException;
 import org.apache.hadoop.yarn.webapp.WebApps;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
-import org.apache.slider.api.ClusterDescription;
 import org.apache.slider.api.InternalKeys;
 import org.apache.slider.api.ResourceKeys;
 import org.apache.slider.api.RoleKeys;
-import org.apache.slider.api.StatusKeys;
+import org.apache.slider.api.proto.Messages;
 import org.apache.slider.api.proto.SliderClusterAPI;
-import org.apache.slider.client.SliderYarnClientImpl;
+import org.apache.slider.api.resource.Application;
 import org.apache.slider.common.SliderExitCodes;
 import org.apache.slider.common.SliderKeys;
 import org.apache.slider.common.params.AbstractActionArgs;
@@ -95,10 +97,7 @@ import org.apache.slider.common.tools.PortScanner;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.common.tools.SliderUtils;
 import org.apache.slider.common.tools.SliderVersionInfo;
-import org.apache.slider.core.buildutils.InstanceIO;
 import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTree;
-import org.apache.slider.core.conf.ConfTreeOperations;
 import org.apache.slider.core.conf.MapOperations;
 import org.apache.slider.core.exceptions.BadConfigException;
 import org.apache.slider.core.exceptions.SliderException;
@@ -109,13 +108,12 @@ import org.apache.slider.core.main.ExitCodeProvider;
 import org.apache.slider.core.main.LauncherExitCodes;
 import org.apache.slider.core.main.RunService;
 import org.apache.slider.core.main.ServiceLauncher;
+import org.apache.slider.core.persist.JsonSerDeser;
 import org.apache.slider.core.registry.info.CustomRegistryConstants;
 import org.apache.slider.providers.ProviderCompleted;
 import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.providers.ProviderService;
 import org.apache.slider.providers.SliderProviderFactory;
-import org.apache.slider.providers.slideram.SliderAMClientProvider;
-import org.apache.slider.providers.slideram.SliderAMProviderService;
 import org.apache.slider.server.appmaster.actions.ActionHalt;
 import org.apache.slider.server.appmaster.actions.ActionRegisterServiceInstance;
 import org.apache.slider.server.appmaster.actions.ActionStopSlider;
@@ -136,7 +134,6 @@ import org.apache.slider.server.appmaster.monkey.ChaosKillContainer;
 import org.apache.slider.server.appmaster.monkey.ChaosMonkeyService;
 import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
 import org.apache.slider.server.appmaster.operations.AsyncRMOperationHandler;
-import org.apache.slider.server.appmaster.operations.ProviderNotifyingOperationHandler;
 import org.apache.slider.server.appmaster.operations.RMOperationHandler;
 import org.apache.slider.server.appmaster.rpc.RpcBinder;
 import org.apache.slider.server.appmaster.rpc.SliderAMPolicyProvider;
@@ -146,6 +143,7 @@ import org.apache.slider.server.appmaster.security.SecurityConfiguration;
 import org.apache.slider.server.appmaster.state.AppState;
 import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
 import org.apache.slider.server.appmaster.state.ContainerAssignment;
+import org.apache.slider.server.appmaster.state.MostRecentContainerReleaseSelector;
 import org.apache.slider.server.appmaster.state.ProviderAppState;
 import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.apache.slider.server.appmaster.web.SliderAMWebApp;
@@ -161,18 +159,20 @@ import org.apache.slider.server.services.workflow.ServiceThreadFactory;
 import org.apache.slider.server.services.workflow.WorkflowExecutorService;
 import org.apache.slider.server.services.workflow.WorkflowRpcService;
 import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
+import org.codehaus.jackson.map.PropertyNamingStrategy;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.net.URI;
 import java.net.URL;
 import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -242,8 +242,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
   @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private RMOperationHandler rmOperationHandler;
-  
-  private RMOperationHandler providerRMOperationHandler;
 
   /** Handle to communicate with the Node Manager*/
   @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
@@ -252,7 +250,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
   /**
    * Credentials for propagating down to launched containers
    */
-  private Credentials containerCredentials;
+  private Credentials containerCredentials = new Credentials();
 
   /**
    * Slider IPC: Real service handler
@@ -320,13 +318,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    */
   private final AtomicBoolean initCompleted = new AtomicBoolean(false);
 
-  /**
-   * Flag to set if the process exit code was set before shutdown started
-   */
-  @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
-  private boolean spawnedProcessExitedBeforeShutdownTriggered;
-
-
   /** Arguments passed in : raw*/
   @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private SliderAMArgs serviceArgs;
@@ -371,7 +362,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
   private SliderAMWebApp webApp;
   @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private InetSocketAddress rpcServiceAddress;
-  private SliderAMProviderService sliderAMProvider;
 
   /**
    * Executor.
@@ -398,12 +388,15 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    */
   private boolean securityEnabled;
   private ContentCache contentCache;
+  private static final JsonSerDeser<Application> jsonSerDeser =
+      new JsonSerDeser<Application>(Application.class,
+          PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES);
 
   /**
    * resource limits
    */
   private Resource maximumResourceCapability;
-
+  private Application application;
   /**
    * Service Constructor
    */
@@ -586,84 +579,31 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
   /**
    * Create and run the cluster.
-   * @param clustername cluster name
+   * @param appName cluster name
    * @return exit code
    * @throws Throwable on a failure
    */
-  private int createAndRunCluster(String clustername) throws Throwable {
-
-    //load the cluster description from the cd argument
-    String sliderClusterDir = serviceArgs.getSliderClusterURI();
-    URI sliderClusterURI = new URI(sliderClusterDir);
-    Path clusterDirPath = new Path(sliderClusterURI);
-    log.info("Application defined at {}", sliderClusterURI);
+  private int createAndRunCluster(String appName) throws Throwable {
+    Path appDir = new Path((serviceArgs.getAppDefDir()));
     SliderFileSystem fs = getClusterFS();
-
-    // build up information about the running application -this
-    // will be passed down to the cluster status
-    MapOperations appInformation = new MapOperations(); 
-
-    AggregateConf instanceDefinition =
-      InstanceIO.loadInstanceDefinitionUnresolved(fs, clusterDirPath);
-    instanceDefinition.setName(clustername);
-
-    log.info("Deploying cluster {}:", instanceDefinition);
-
-    // and resolve it
-    AggregateConf resolvedInstance = new AggregateConf( instanceDefinition);
-    resolvedInstance.resolve();
-
-    stateForProviders.setApplicationName(clustername);
-
+    fs.setAppDir(appDir);
+    Path appJson = new Path(appDir, appName + ".json");
+    log.info("Loading application definition from " + appJson);
+    application = jsonSerDeser.load(fs.getFileSystem(), appJson);
+    log.info("Application Json: " + application);
+    stateForProviders.setApplicationName(appName);
     Configuration serviceConf = getConfig();
 
-    // extend AM configuration with component resource
-    MapOperations amConfiguration = resolvedInstance
-      .getAppConfOperations().getComponent(COMPONENT_AM);
-    // and patch configuration with prefix
-    if (amConfiguration != null) {
-      Map<String, String> sliderAppConfKeys = amConfiguration.prefixedWith("slider.");
-      for (Map.Entry<String, String> entry : sliderAppConfKeys.entrySet()) {
-        String k = entry.getKey();
-        String v = entry.getValue();
-        boolean exists = serviceConf.get(k) != null;
-        log.info("{} {} to {}", (exists ? "Overwriting" : "Setting"), k, v);
-        serviceConf.set(k, v);
-      }
-    }
-
-    securityConfiguration = new SecurityConfiguration(serviceConf, resolvedInstance, clustername);
     // obtain security state
-    securityEnabled = securityConfiguration.isSecurityEnabled();
     // set the global security flag for the instance definition
-    instanceDefinition.getAppConfOperations().set(KEY_SECURITY_ENABLED, securityEnabled);
-
-    // triggers resolution and snapshotting for agent
-    appState.setInitialInstanceDefinition(instanceDefinition);
 
-    File confDir = getLocalConfDir();
-    if (!confDir.exists() || !confDir.isDirectory()) {
-      log.info("Conf dir {} does not exist.", confDir);
-      File parentFile = confDir.getParentFile();
-      log.info("Parent dir {}:\n{}", parentFile, SliderUtils.listDir(parentFile));
-    }
-    
     //get our provider
-    MapOperations globalInternalOptions = getGlobalInternalOptions();
-    String providerType = globalInternalOptions.getMandatoryOption(
-      InternalKeys.INTERNAL_PROVIDER_NAME);
-    log.info("Cluster provider type is {}", providerType);
     SliderProviderFactory factory =
-      SliderProviderFactory.createSliderProviderFactory(providerType);
+      SliderProviderFactory.createSliderProviderFactory("docker");
     providerService = factory.createServerProvider();
     // init the provider BUT DO NOT START IT YET
     initAndAddService(providerService);
-    providerRMOperationHandler = new ProviderNotifyingOperationHandler(providerService);
-    
-    // create a slider AM provider
-    sliderAMProvider = new SliderAMProviderService();
-    initAndAddService(sliderAMProvider);
-    
+
     InetSocketAddress rmSchedulerAddress = SliderUtils.getRmSchedulerAddress(serviceConf);
     log.info("RM is at {}", rmSchedulerAddress);
     yarnRPC = YarnRPC.create(serviceConf);
@@ -689,10 +629,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     ApplicationId appid = appAttemptID.getApplicationId();
     log.info("AM for ID {}", appid.getId());
 
-    appInformation.put(StatusKeys.INFO_AM_CONTAINER_ID, appMasterContainerID.toString());
-    appInformation.put(StatusKeys.INFO_AM_APP_ID, appid.toString());
-    appInformation.put(StatusKeys.INFO_AM_ATTEMPT_ID, appAttemptID.toString());
-
     Map<String, String> envVars;
     List<Container> liveContainers;
 
@@ -731,28 +667,22 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       }
 
       //bring up the Slider RPC service
-      buildPortScanner(instanceDefinition);
-      startSliderRPCServer(instanceDefinition);
+      buildPortScanner();
+      startSliderRPCServer();
 
       rpcServiceAddress = rpcService.getConnectAddress();
       appMasterHostname = rpcServiceAddress.getAddress().getCanonicalHostName();
       appMasterRpcPort = rpcServiceAddress.getPort();
       appMasterTrackingUrl = null;
       log.info("AM Server is listening at {}:{}", appMasterHostname, appMasterRpcPort);
-      appInformation.put(StatusKeys.INFO_AM_HOSTNAME, appMasterHostname);
-      appInformation.set(StatusKeys.INFO_AM_RPC_PORT, appMasterRpcPort);
 
       log.info("Starting Yarn registry");
       registryOperations = startRegistryOperationsService();
       log.info(registryOperations.toString());
 
       //build the role map
-      List<ProviderRole> providerRoles = new ArrayList<>(providerService.getRoles());
-      providerRoles.addAll(SliderAMClientProvider.ROLES);
-
+      List<ProviderRole> providerRoles = Collections.EMPTY_LIST;
       // Start up the WebApp and track the URL for it
-      MapOperations component = instanceDefinition.getAppConfOperations()
-          .getComponent(SliderKeys.COMPONENT_AM);
 
       // Web service endpoints: initialize
       WebAppApiImpl webAppApi =
@@ -760,9 +690,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
               stateForProviders,
               providerService, registryOperations,
               metricsAndMonitoring,
-              actionQueues,
-              this,
-              contentCache);
+              actionQueues);
       initAMFilterOptions(serviceConf);
 
       int webAppPort = deployWebApplication(webAppApi);
@@ -770,9 +698,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       String scheme = WebAppUtils.HTTP_PREFIX;
       appMasterTrackingUrl = scheme + appMasterHostname + ":" + webAppPort;
 
-      appInformation.put(StatusKeys.INFO_AM_WEB_URL, appMasterTrackingUrl + "/");
-      appInformation.set(StatusKeys.INFO_AM_WEB_PORT, webAppPort);
-
       // *****************************************************
       // Register self with ResourceManager
       // This will start heartbeating to the RM
@@ -785,6 +710,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
                                    appMasterTrackingUrl);
       maximumResourceCapability = amRegistrationData.getMaximumResourceCapability();
 
+      //TODO should not read local configs !!!
       int minMemory = serviceConf.getInt(RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
           DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
        // validate scheduler vcores allocation setting
@@ -798,11 +724,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       // the max value as part of its lookup
       rmOperationHandler = new AsyncRMOperationHandler(asyncRMClient, maximumResourceCapability);
 
-      // set the RM-defined maximum cluster values
-      appInformation.put(ResourceKeys.YARN_CORES, Integer.toString(maxCores));
-      appInformation.put(ResourceKeys.YARN_MEMORY, Integer.toString(maxMemory));
-
-      processAMCredentials(securityConfiguration);
+//      processAMCredentials(securityConfiguration);
 
       if (securityEnabled) {
         secretManager.setMasterKey(
@@ -817,7 +739,9 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
           // principal.  Can do so now since AM registration with RM above required
           // tokens associated to principal
           String principal = securityConfiguration.getPrincipal();
-          File localKeytabFile = securityConfiguration.getKeytabFile(instanceDefinition);
+          //TODO read key tab file from slider-am.xml
+          File localKeytabFile =
+              securityConfiguration.getKeytabFile(new AggregateConf());
           // Now log in...
           login(principal, localKeytabFile);
           // obtain new FS reference that should be kerberos based and different
@@ -829,10 +753,10 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       // YARN client.
       // Important: this is only valid at startup, and must be executed within
       // the right UGI context. Use with care.
-      SliderYarnClientImpl yarnClient = null;
+      YarnClient yarnClient = null;
       List<NodeReport> nodeReports;
       try {
-        yarnClient = new SliderYarnClientImpl();
+        yarnClient = YarnClient.createYarnClient();
         yarnClient.init(getConfig());
         yarnClient.start();
         nodeReports = getNodeReports(yarnClient);
@@ -856,45 +780,23 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       // extract container list
 
       liveContainers = amRegistrationData.getContainersFromPreviousAttempts();
-
-      //now validate the installation
-      Configuration providerConf =
-        providerService.loadProviderConfigurationInformation(confDir);
-
-      providerService.initializeApplicationConfiguration(instanceDefinition,
-          fs, null);
-
-      providerService.validateApplicationConfiguration(instanceDefinition,
-          confDir,
-          securityEnabled);
+      DefaultMetricsSystem.initialize("SliderAppMaster");
 
       //determine the location for the role history data
-      Path historyDir = new Path(clusterDirPath, HISTORY_DIR_NAME);
+      Path historyDir = new Path(appDir, HISTORY_DIR_NAME);
 
       //build the instance
       AppStateBindingInfo binding = new AppStateBindingInfo();
-      binding.instanceDefinition = instanceDefinition;
       binding.serviceConfig = serviceConf;
-      binding.publishedProviderConf = providerConf;
       binding.roles = providerRoles;
       binding.fs = fs.getFileSystem();
       binding.historyPath = historyDir;
       binding.liveContainers = liveContainers;
-      binding.applicationInfo = appInformation;
-      binding.releaseSelector = providerService.createContainerReleaseSelector();
+      binding.releaseSelector =  new MostRecentContainerReleaseSelector();
       binding.nodeReports = nodeReports;
+      binding.application = application;
       appState.buildInstance(binding);
 
-      providerService.rebuildContainerDetails(liveContainers,
-          instanceDefinition.getName(), appState.getRolePriorityMap());
-
-      // add the AM to the list of nodes in the cluster
-
-      appState.buildAppMasterNode(appMasterContainerID,
-          appMasterHostname,
-          webAppPort,
-          appMasterHostname + ":" + webAppPort);
-
       // build up environment variables that the AM wants set in every container
       // irrespective of provider and role.
       envVars = new HashMap<>();
@@ -908,8 +810,8 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     }
     String rolesTmpSubdir = appMasterContainerID.toString() + "/roles";
 
-    String amTmpDir = globalInternalOptions.getMandatoryOption(InternalKeys.INTERNAL_AM_TMP_DIR);
-
+    String amTmpDir = "/tmp";
+    //TODO read tmpDir from slider-am.xml
     Path tmpDirPath = new Path(amTmpDir);
     Path launcherTmpDirPath = new Path(tmpDirPath, rolesTmpSubdir);
     fs.getFileSystem().mkdirs(launcherTmpDirPath);
@@ -917,29 +819,15 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     //launcher service
     launchService = new RoleLaunchService(actionQueues,
                                           providerService,
-                                          fs,
-                                          new Path(getGeneratedConfDir()),
-                                          envVars,
-                                          launcherTmpDirPath);
+                                          fs, envVars);
 
     deployChildService(launchService);
 
-    appState.noteAMLaunched();
-
-
     //Give the provider access to the state, and AM
-    providerService.bind(stateForProviders, actionQueues, liveContainers);
-    sliderAMProvider.bind(stateForProviders, actionQueues, liveContainers);
+    providerService.setAMState(stateForProviders);
 
     // chaos monkey
-    maybeStartMonkey();
-
-    // setup token renewal and expiry handling for long lived apps
-//    if (!securityConfiguration.isKeytabProvided() &&
-//        SliderUtils.isHadoopClusterSecure(getConfig())) {
-//      fsDelegationTokenManager = new FsDelegationTokenManager(actionQueues);
-//      fsDelegationTokenManager.acquireDelegationToken(getConfig());
-//    }
+//    maybeStartMonkey();
 
     // if not a secure cluster, extract the username -it will be
     // propagated to workers
@@ -955,25 +843,21 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     log.info("Application Master Initialization Completed");
     initCompleted.set(true);
 
-    scheduleFailureWindowResets(instanceDefinition.getResources());
-    scheduleEscalation(instanceDefinition.getInternal());
+    scheduleFailureWindowResets(application.getConfiguration());
+    scheduleEscalation(application.getConfiguration());
 
     try {
       // schedule YARN Registry registration
-      queue(new ActionRegisterServiceInstance(clustername, appid));
+      queue(new ActionRegisterServiceInstance(appName, appid, application));
 
       // log the YARN and web UIs
       log.info("RM Webapp address {}",
           serviceConf.get(YarnConfiguration.RM_WEBAPP_ADDRESS));
       log.info("Slider webapp address {} proxied at {}",
         appMasterTrackingUrl, appMasterProxiedUrl);
-
-      // Start the Slider AM provider
-      sliderAMProvider.start();
-
       // launch the real provider; this is expected to trigger a callback that
       // starts the node review process
-      launchProviderService(instanceDefinition, confDir);
+      launchProviderService();
 
       // start handling any scheduled events
 
@@ -1000,7 +884,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    * @throws InterruptedException
    */
   private ApplicationAttemptReport getApplicationAttemptReport(
-    final SliderYarnClientImpl yarnClient)
+    final YarnClient yarnClient)
       throws YarnException, IOException, InterruptedException {
     Preconditions.checkNotNull(yarnClient, "Null Yarn client");
     ApplicationAttemptReport report;
@@ -1019,14 +903,14 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
   }
 
   /**
-   * List the node reports: uses {@link SliderYarnClientImpl} as the login user
+   * List the node reports: uses {@link YarnClient} as the login user
    * @param yarnClient client to the RM
    * @return the node reports
    * @throws IOException
    * @throws YarnException
    * @throws InterruptedException
    */
-  private List<NodeReport> getNodeReports(final SliderYarnClientImpl yarnClient)
+  private List<NodeReport> getNodeReports(final YarnClient yarnClient)
     throws IOException, YarnException, InterruptedException {
     Preconditions.checkNotNull(yarnClient, "Null Yarn client");
     List<NodeReport> nodeReports;
@@ -1051,7 +935,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    *   Creates and starts the web application, and adds a
    *   <code>WebAppService</code> service under the AM, to ensure
    *   a managed web application shutdown.
-   * @param webAppApi web app API instance
+   * @param webAppApi web application API instance
    * @return port the web application is deployed on
    * @throws IOException general problems starting the webapp (network, etc)
    * @throws WebAppException other issues
@@ -1117,12 +1001,14 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
   /**
    * Build up the port scanner. This may include setting a port range.
    */
-  private void buildPortScanner(AggregateConf instanceDefinition)
+  private void buildPortScanner()
       throws BadConfigException {
     portScanner = new PortScanner();
-    String portRange = instanceDefinition.
-        getAppConfOperations().getGlobalOptions().
-          getOption(SliderKeys.KEY_ALLOWED_PORT_RANGE, "0");
+    String portRange = "0";
+    //TODO read from slider-am.xml
+//    String portRange = instanceDefinition.
+//        getAppConfOperations().getGlobalOptions().
+//          getOption(SliderKeys.KEY_ALLOWED_PORT_RANGE, "0");
     if (!"0".equals(portRange)) {
         portScanner.setPortRange(portRange);
     }
@@ -1203,11 +1089,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    * @throws IOException
    */
   public void registerServiceInstance(String instanceName,
-      ApplicationId appId) throws IOException {
-    
-    
-    // the registry is running, so register services
-    URL amWebURI = new URL(appMasterProxiedUrl);
+      ApplicationId appId, Application application) throws IOException {
 
     //Give the provider restricted access to the state, registry
     setupInitialRegistryPaths();
@@ -1218,7 +1100,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
         instanceName,
         appAttemptID);
     providerService.bindToYarnRegistry(yarnRegistryOperations);
-    sliderAMProvider.bindToYarnRegistry(yarnRegistryOperations);
 
     // Yarn registry
     ServiceRecord serviceRecord = new ServiceRecord();
@@ -1231,19 +1112,10 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
         RegistryTypeUtils.ipcEndpoint(
             CustomRegistryConstants.AM_IPC_PROTOCOL,
             rpcServiceAddress));
-            
-    // internal services
-    sliderAMProvider.applyInitialRegistryDefinitions(amWebURI,
-        serviceRecord);
-
-    // provider service dynamic definitions.
-    providerService.applyInitialRegistryDefinitions(amWebURI, serviceRecord);
-
 
     // set any provided attributes
-    setProvidedServiceRecordAttributes(
-        getInstanceDefinition().getAppConfOperations().getComponent(
-            SliderKeys.COMPONENT_AM), serviceRecord);
+    setUserProvidedServiceRecordAttributes(application.getConfiguration(),
+        serviceRecord);
 
     // register the service's entry
     log.info("Service Record \n{}", serviceRecord);
@@ -1276,7 +1148,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
   /**
    * Handler for {@link RegisterComponentInstance action}
-   * Register/re-register an ephemeral container that is already in the app state
+   * Register/re-register an ephemeral container that is already in the application state
    * @param id the component
    * @param description component description
    * @param type component type
@@ -1291,32 +1163,36 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     // this is where component registrations  go
     log.info("Registering component {}", id);
     String cid = RegistryPathUtils.encodeYarnID(id.toString());
-    ServiceRecord container = new ServiceRecord();
-    container.set(YarnRegistryAttributes.YARN_ID, cid);
-    container.description = description;
-    container.set(YarnRegistryAttributes.YARN_PERSISTENCE,
+    ServiceRecord record = new ServiceRecord();
+    record.set(YarnRegistryAttributes.YARN_ID, cid);
+    record.description = description;
+    record.set(YarnRegistryAttributes.YARN_PERSISTENCE,
         PersistencePolicies.CONTAINER);
-    MapOperations compOps = getInstanceDefinition().getAppConfOperations().
-        getComponent(type);
-    setProvidedServiceRecordAttributes(compOps, container);
+    setUserProvidedServiceRecordAttributes(
+        instance.providerRole.component.getConfiguration(), record);
     try {
-      yarnRegistryOperations.putComponent(cid, container);
+      yarnRegistryOperations.putComponent(cid, record);
     } catch (IOException e) {
       log.warn("Failed to register container {}/{}: {}",
           id, description, e, e);
       return false;
     }
+    org.apache.slider.api.resource.Container container =
+        new org.apache.slider.api.resource.Container();
+    container.setId(id.toString());
+    container.setLaunchTime(new Date());
+    container.setState(org.apache.slider.api.resource.ContainerState.INIT);
+    container.setBareHost(instance.host);
+    instance.providerRole.component.addContainer(container);
     return true;
   }
 
-  protected void setProvidedServiceRecordAttributes(MapOperations ops,
-                                                  ServiceRecord record) {
+  protected void setUserProvidedServiceRecordAttributes(
+      org.apache.slider.api.resource.Configuration conf, ServiceRecord record) {
     String prefix = RoleKeys.SERVICE_RECORD_ATTRIBUTE_PREFIX;
-    for (Map.Entry<String, String> entry : ops.entrySet()) {
-      if (entry.getKey().startsWith(
-          prefix)) {
-        String key = entry.getKey().substring(
-            prefix.length() + 1);
+    for (Map.Entry<String, String> entry : conf.getProperties().entrySet()) {
+      if (entry.getKey().startsWith(prefix)) {
+        String key = entry.getKey().substring(prefix.length() + 1);
         record.set(key, entry.getValue().trim());
       }
     }
@@ -1366,35 +1242,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
   }
 
   /**
-   * Build the configuration directory passed in or of the target FS
-   * @return the file
-   */
-  public File getLocalConfDir() {
-    File confdir =
-      new File(SliderKeys.PROPAGATED_CONF_DIR_NAME).getAbsoluteFile();
-    return confdir;
-  }
-
-  /**
-   * Get the path to the DFS configuration that is defined in the cluster specification 
-   * @return the generated configuration dir
-   */
-  public String getGeneratedConfDir() {
-    return getGlobalInternalOptions().get(
-        InternalKeys.INTERNAL_GENERATED_CONF_PATH);
-  }
-
-  /**
-   * Get the global internal options for the AM
-   * @return a map to access the internals
-   */
-  public MapOperations getGlobalInternalOptions() {
-    return getInstanceDefinition()
-      .getInternalOperations().
-      getGlobalOptions();
-  }
-
-  /**
    * Get the filesystem of this cluster
    * @return the FS of the config
    */
@@ -1480,11 +1327,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     Exception exception = stopAction.getEx();
 
     appStatus = stopAction.getFinalApplicationStatus();
-    if (!spawnedProcessExitedBeforeShutdownTriggered) {
-      //stopped the forked process but don't worry about its exit code
-      int forkedExitCode = stopForkedProcess();
-      log.debug("Stopped forked process: exit code={}", forkedExitCode);
-    }
 
     // make sure the AM is actually registered. If not, there's no point
     // trying to unregister it
@@ -1500,7 +1342,8 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     launchService.stop();
 
     //now release all containers
-    releaseAllContainers();
+    releaseAllContainers(application);
+    DefaultMetricsSystem.shutdown();
 
     // When the application completes, it should send a finish application
     // signal to the RM
@@ -1536,7 +1379,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
   /**
    * Start the slider RPC server
    */
-  private void startSliderRPCServer(AggregateConf instanceDefinition)
+  private void startSliderRPCServer()
       throws IOException, SliderException {
     verifyIPCAccess();
 
@@ -1612,16 +1455,8 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
     //for each assignment: instantiate that role
     for (ContainerAssignment assignment : assignments) {
-      try {
-        launchService.launchRole(assignment, getInstanceDefinition(),
-            buildContainerCredentials());
-      } catch (IOException e) {
-        // Can be caused by failure to renew credentials with the remote
-        // service. If so, don't launch the application. Container is retained,
-        // though YARN will take it away after a timeout.
-        log.error("Failed to build credentials to launch container: {}", e, e);
-
-      }
+      //TODO Do we need to pass credentials to containers?
+      launchService.launchRole(assignment, application, null);
     }
 
     //for all the operations, exec them
@@ -1645,7 +1480,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
       // non complete containers should not be here
       assert (status.getState() == ContainerState.COMPLETE);
-      AppState.NodeCompletionResult result = appState.onCompletedNode(status);
+      AppState.NodeCompletionResult result = appState.onCompletedContainer(status);
       if (result.containerFailed) {
         RoleInstance ri = result.roleInstance;
         log.error("Role instance {} failed ", ri);
@@ -1653,7 +1488,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
       //  known nodes trigger notifications
       if(!result.unknownNode) {
-        getProviderService().notifyContainerCompleted(containerId);
         queue(new UnregisterComponentInstance(containerId, 0,
             TimeUnit.MILLISECONDS));
       }
@@ -1724,22 +1558,14 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    * Implementation of cluster flexing.
    * It should be the only way that anything -even the AM itself on startup-
    * asks for nodes. 
-   * @param resources the resource tree
    * @throws SliderException slider problems, including invalid configs
    * @throws IOException IO problems
    */
-  public void flexCluster(ConfTree resources)
+  public void flexCluster(Messages.FlexComponentRequestProto request)
       throws IOException, SliderException {
-
-    AggregateConf newConf =
-        new AggregateConf(appState.getInstanceDefinitionSnapshot());
-    newConf.setResources(resources);
-    // verify the new definition is valid
-    sliderAMProvider.validateInstanceDefinition(newConf);
-    providerService.validateInstanceDefinition(newConf);
-
-    appState.updateResourceDefinitions(resources);
-
+    if (request != null) {
+      appState.updateComponents(request);
+    }
     // reset the scheduled windows...the values
     // may have changed
     appState.resetFailureCounts();
@@ -1750,24 +1576,37 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
   /**
    * Schedule the failure window
-   * @param resources the resource tree
    * @throws BadConfigException if the window is out of range
    */
-  private void scheduleFailureWindowResets(ConfTree resources) throws
-      BadConfigException {
+  private void scheduleFailureWindowResets(
+      org.apache.slider.api.resource.Configuration conf) {
+
     ResetFailureWindow reset = new ResetFailureWindow(rmOperationHandler);
-    ConfTreeOperations ops = new ConfTreeOperations(resources);
-    MapOperations globals = ops.getGlobalOptions();
-    long seconds = globals.getTimeRange(ResourceKeys.CONTAINER_FAILURE_WINDOW,
-        ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_DAYS,
-        ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_HOURS,
-        ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_MINUTES, 0);
-    if (seconds > 0) {
-      log.info(
-          "Scheduling the failure window reset interval to every {} seconds",
-          seconds);
-      RenewingAction<ResetFailureWindow> renew = new RenewingAction<>(
-          reset, seconds, seconds, TimeUnit.SECONDS, 0);
+
+    long days =
+        conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".days",
+            ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_DAYS);
+    long hours =
+        conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".hours",
+            ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_HOURS);
+    long minutes =
+        conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".minutes",
+            ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_MINUTES);
+    long seconds =
+        conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".seconds",
+            0);
+    Preconditions
+        .checkState(days >= 0 && hours >= 0 && minutes >= 0 && seconds >= 0,
+            "Time range for has negative time component %s:%s:%s:%s", days,
+            hours, minutes, seconds);
+    long totalMinutes = days * 24 * 60 + hours * 24 + minutes;
+    long totalSeconds = totalMinutes * 60 + seconds;
+    if (totalSeconds > 0) {
+      log.info("Scheduling the failure window reset interval to every {}"
+              + " seconds", totalSeconds);
+      RenewingAction<ResetFailureWindow> renew =
+          new RenewingAction<>(reset, totalSeconds, totalSeconds,
+              TimeUnit.SECONDS, 0);
       actionQueues.renewing("failures", renew);
     } else {
       log.info("Failure window reset interval is not set");
@@ -1776,16 +1615,15 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
   /**
    * Schedule the escalation action
-   * @param internal
    * @throws BadConfigException
    */
-  private void scheduleEscalation(ConfTree internal) throws BadConfigException {
+  private void scheduleEscalation(
+      org.apache.slider.api.resource.Configuration conf) {
     EscalateOutstandingRequests escalate = new EscalateOutstandingRequests();
-    ConfTreeOperations ops = new ConfTreeOperations(internal);
-    int seconds = ops.getGlobalOptions().getOptionInt(InternalKeys.ESCALATION_CHECK_INTERVAL,
+    long seconds = conf.getPropertyLong(InternalKeys.ESCALATION_CHECK_INTERVAL,
         InternalKeys.DEFAULT_ESCALATION_CHECK_INTERVAL);
-    RenewingAction<EscalateOutstandingRequests> renew = new RenewingAction<>(
-        escalate, seconds, seconds, TimeUnit.SECONDS, 0);
+    RenewingAction<EscalateOutstandingRequests> renew =
+        new RenewingAction<>(escalate, seconds, seconds, TimeUnit.SECONDS, 0);
     actionQueues.renewing("escalation", renew);
   }
   
@@ -1794,7 +1632,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    * @param reason reason for operation
    */
   private synchronized void reviewRequestAndReleaseNodes(String reason) {
-    log.debug("reviewRequestAndReleaseNodes({})", reason);
+    log.info("reviewRequestAndReleaseNodes({})", reason);
     queue(new ReviewAndFlexApplicationSize(reason, 0, TimeUnit.SECONDS));
   }
 
@@ -1810,6 +1648,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
     if ( actionQueues.hasQueuedActionWithAttribute(
         AsyncAction.ATTR_REVIEWS_APP_SIZE | AsyncAction.ATTR_HALTS_APP)) {
+      //TODO Loop all actions to check duplicate ??
       // this operation isn't needed at all -existing duplicate or shutdown due
       return;
     }
@@ -1829,14 +1668,12 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
   public synchronized void executeNodeReview(String reason)
       throws SliderInternalStateException {
     
-    log.debug("in executeNodeReview({})", reason);
+    log.info("in executeNodeReview({})", reason);
     if (amCompletionFlag.get()) {
       log.info("Ignoring node review operation: shutdown in progress");
     }
     try {
       List<AbstractRMOperation> allOperations = appState.reviewRequestAndReleaseNodes();
-      // tell the provider
-      providerRMOperationHandler.execute(allOperations);
       //now apply the operations
       execute(allOperations);
     } catch (TriggerClusterTeardownException e) {
@@ -1853,7 +1690,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    */
   public void escalateOutstandingRequests() {
     List<AbstractRMOperation> operations = appState.escalateOutstandingRequests();
-    providerRMOperationHandler.execute(operations);
     execute(operations);
   }
 
@@ -1861,11 +1697,11 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
   /**
    * Shutdown operation: release all containers
    */
-  private void releaseAllContainers() {
+  private void releaseAllContainers(Application application) {
     // Add the sleep here (before releasing containers) so that applications get
     // time to perform graceful shutdown
     try {
-      long timeout = getContainerReleaseTimeout();
+      long timeout = getContainerReleaseTimeout(application);
       if (timeout > 0) {
         Thread.sleep(timeout);
       }
@@ -1873,22 +1709,16 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       log.info("Sleep for container release interrupted");
     } finally {
       List<AbstractRMOperation> operations = appState.releaseAllContainers();
-      providerRMOperationHandler.execute(operations);
       // now apply the operations
       execute(operations);
     }
   }
 
-  private long getContainerReleaseTimeout() {
+  private long getContainerReleaseTimeout(Application application) {
     // Get container release timeout in millis or 0 if the property is not set.
-    // If non-zero then add the agent heartbeat delay time, since it can take up
-    // to that much time for agents to receive the stop command.
-    int timeout = getInstanceDefinition().getAppConfOperations()
-        .getGlobalOptions()
-        .getOptionInt(SliderKeys.APP_CONTAINER_RELEASE_TIMEOUT, 0);
-    if (timeout > 0) {
-      timeout += SliderKeys.APP_CONTAINER_HEARTBEAT_INTERVAL_SEC;
-    }
+    long timeout = application.getConfiguration()
+        .getPropertyLong(SliderKeys.APP_CONTAINER_RELEASE_TIMEOUT, 0);
+
     // convert to millis
     long timeoutInMillis = timeout * 1000l;
     log.info("Container release timeout in millis = {}", timeoutInMillis);
@@ -2000,27 +1830,15 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
   /**
    * Launch the provider service
-   *
-   * @param instanceDefinition definition of the service
-   * @param confDir directory of config data
    * @throws IOException
    * @throws SliderException
    */
-  protected synchronized void launchProviderService(AggregateConf instanceDefinition,
-                                                    File confDir)
-    throws IOException, SliderException {
-    Map<String, String> env = new HashMap<>();
-    boolean execStarted = providerService.exec(instanceDefinition, confDir, env,
-        this);
-    if (execStarted) {
-      providerService.registerServiceListener(this);
-      providerService.start();
-    } else {
-      // didn't start, so don't register
-      providerService.start();
-      // and send the started event ourselves
-      eventCallbackEvent(null);
-    }
+  protected synchronized void launchProviderService()
+      throws IOException, SliderException {
+    // didn't start, so don't register
+    providerService.start();
+    // and send the started event ourselves
+    eventCallbackEvent(null);
   }
 
   /* =================================================================== */
@@ -2029,11 +1847,9 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
   @Override // ProviderCompleted
   public void eventCallbackEvent(Object parameter) {
-    // signalled that the child process is up.
-    appState.noteAMLive();
     // now ask for the cluster nodes
     try {
-      flexCluster(getInstanceDefinition().getResources());
+      flexCluster(null);
     } catch (Exception e) {
       // cluster flex failure: log
       log.error("Failed to flex cluster nodes: {}", e, e);
@@ -2064,62 +1880,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     }
   }
 
-  /* =================================================================== */
-  /* ServiceStateChangeListener */
-  /* =================================================================== */
-
-  /**
-   * Received on listening service termination.
-   * @param service the service that has changed.
-   */
-  @Override //ServiceStateChangeListener
-  public void stateChanged(Service service) {
-    if (service == providerService && service.isInState(STATE.STOPPED)) {
-      //its the current master process in play
-      int exitCode = providerService.getExitCode();
-      int mappedProcessExitCode = exitCode;
-
-      boolean shouldTriggerFailure = !amCompletionFlag.get()
-         && (mappedProcessExitCode != 0);
-
-      if (shouldTriggerFailure) {
-        String reason =
-            "Spawned process failed with raw " + exitCode + " mapped to " +
-            mappedProcessExitCode;
-        ActionStopSlider stop = new ActionStopSlider("stop",
-            mappedProcessExitCode,
-            FinalApplicationStatus.FAILED,
-            reason);
-        //this wasn't expected: the process finished early
-        spawnedProcessExitedBeforeShutdownTriggered = true;
-        log.info(
-          "Process has exited with exit code {} mapped to {} -triggering termination",
-          exitCode,
-          mappedProcessExitCode);
-
-        //tell the AM the cluster is complete 
-        signalAMComplete(stop);
-      } else {
-        //we don't care
-        log.info(
-          "Process has exited with exit code {} mapped to {} -ignoring",
-          exitCode,
-          mappedProcessExitCode);
-      }
-    } else {
-      super.stateChanged(service);
-    }
-  }
-
-  /**
-   * stop forked process if it the running process var is not null
-   * @return the process exit code
-   */
-  protected synchronized Integer stopForkedProcess() {
-    providerService.stop();
-    return providerService.getExitCode();
-  }
-
   /**
    *  Async start container request
    * @param container container
@@ -2221,16 +1981,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     LOG_YARN.warn("Failed to stop Container {}", containerId);
   }
 
-  public AggregateConf getInstanceDefinition() {
-    return appState.getInstanceDefinition();
-  }
-
-  /**
-   * This is the status, the live model
-   */
-  public ClusterDescription getClusterDescription() {
-    return appState.getClusterStatus();
-  }
 
   public ProviderService getProviderService() {
     return providerService;
@@ -2278,12 +2028,12 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
   }
 
   /**
-   * Start the chaos monkey
+   * TODO Start the chaos monkey
    * @return true if it started
    */
   private boolean maybeStartMonkey() {
-    MapOperations internals = getGlobalInternalOptions();
-
+//    MapOperations internals = getGlobalInternalOptions();
+    MapOperations internals = new MapOperations();
     Boolean enabled =
         internals.getOptionBool(InternalKeys.CHAOS_MONKEY_ENABLED,
             InternalKeys.DEFAULT_CHAOS_MONKEY_ENABLED);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
index 6b61681..a660958 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
@@ -18,6 +18,7 @@
 
 package org.apache.slider.server.appmaster.actions;
 
+import org.apache.slider.api.proto.Messages;
 import org.apache.slider.core.conf.ConfTree;
 import org.apache.slider.server.appmaster.SliderAppMaster;
 import org.apache.slider.server.appmaster.state.AppState;
@@ -26,19 +27,16 @@ import java.util.concurrent.TimeUnit;
 
 public class ActionFlexCluster extends AsyncAction {
 
-  public final ConfTree resources;
-  
-  public ActionFlexCluster(String name,
-      long delay,
-      TimeUnit timeUnit, ConfTree resources) {
+  final Messages.FlexComponentRequestProto requestProto;
+  public ActionFlexCluster(String name, long delay, TimeUnit timeUnit,
+      Messages.FlexComponentRequestProto requestProto) {
     super(name, delay, timeUnit, ATTR_CHANGES_APP_SIZE);
-    this.resources = resources;
+    this.requestProto = requestProto;
   }
-
   @Override
   public void execute(SliderAppMaster appMaster,
       QueueAccess queueService,
       AppState appState) throws Exception {
-    appMaster.flexCluster(resources);
+    appMaster.flexCluster(requestProto);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionRegisterServiceInstance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionRegisterServiceInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionRegisterServiceInstance.java
index ca330af..0d7f7d4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionRegisterServiceInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionRegisterServiceInstance.java
@@ -19,6 +19,7 @@
 package org.apache.slider.server.appmaster.actions;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.slider.api.resource.Application;
 import org.apache.slider.server.appmaster.SliderAppMaster;
 import org.apache.slider.server.appmaster.state.AppState;
 
@@ -31,21 +32,13 @@ public class ActionRegisterServiceInstance extends AsyncAction {
 
   private final String instanceName;
   private final ApplicationId appId;
-
+  private final Application application;
   public ActionRegisterServiceInstance(String instanceName,
-      ApplicationId appId) {
+      ApplicationId appId, Application application) {
     super("ActionRegisterServiceInstance");
     this.instanceName = instanceName;
     this.appId = appId;
-  }
-
-  public ActionRegisterServiceInstance(String instanceName,
-      ApplicationId appId,
-      long delay,
-      TimeUnit timeUnit) {
-    super("ActionRegisterServiceInstance", delay, timeUnit);
-    this.instanceName = instanceName;
-    this.appId = appId;
+    this.application = application;
   }
 
   @Override
@@ -54,6 +47,6 @@ public class ActionRegisterServiceInstance extends AsyncAction {
       AppState appState) throws Exception {
 
     // YARN Registry do the registration
-    appMaster.registerServiceInstance(instanceName, appId);
+    appMaster.registerServiceInstance(instanceName, appId, application);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java
new file mode 100644
index 0000000..510ff73
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.metrics;
+
+import com.codahale.metrics.Counter;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsSource;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.hadoop.metrics2.lib.Interns.info;
+
+@Metrics(context = "yarn-native-service")
+public class SliderMetrics implements MetricsSource {
+
+  @Metric("containers pending")
+  public MutableGaugeInt containersPending;
+  @Metric("anti-affinity containers pending")
+  public MutableGaugeInt pendingAAContainers;
+  @Metric("containers pending")
+  public MutableGaugeInt containersRunning;
+  @Metric("containers requested")
+  public MutableGaugeInt containersDesired;
+  @Metric("containers completed")
+  public MutableGaugeInt containersCompleted;
+  @Metric("containers failed")
+  public MutableGaugeInt containersFailed;
+  @Metric("containers failed since last threshold")
+  public MutableGaugeInt failedSinceLastThreshold;
+  @Metric("containers preempted")
+  public MutableGaugeInt containersPreempted;
+  @Metric("containers surplus")
+  public MutableGaugeInt surplusContainers;
+
+  protected final MetricsRegistry registry;
+
+  public SliderMetrics(MetricsInfo metricsInfo) {
+    registry = new MetricsRegistry(metricsInfo);
+  }
+
+  @Override
+  public void getMetrics(MetricsCollector collector, boolean all) {
+    registry.snapshot(collector.addRecord(registry.info()), all);
+  }
+
+  public static SliderMetrics register(String name, String description) {
+    SliderMetrics metrics = new SliderMetrics(info(name, description));
+    DefaultMetricsSystem.instance().register(name, description, metrics);
+    return metrics;
+  }
+
+  public void tag(String name, String description, String value) {
+    registry.tag(name, description, value);
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/operations/ProviderNotifyingOperationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/operations/ProviderNotifyingOperationHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/operations/ProviderNotifyingOperationHandler.java
deleted file mode 100644
index 972cc30..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/operations/ProviderNotifyingOperationHandler.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.appmaster.operations;
-
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.client.api.AMRMClient;
-import org.apache.slider.providers.ProviderService;
-
-import java.util.List;
-
-public class ProviderNotifyingOperationHandler extends RMOperationHandler {
-  
-  private final ProviderService providerService;
-
-  public ProviderNotifyingOperationHandler(ProviderService providerService) {
-    this.providerService = providerService;
-  }
-
-  @Override
-  public void releaseAssignedContainer(ContainerId containerId) {
-    providerService.releaseAssignedContainer(containerId);
-  }
-
-  @Override
-  public void addContainerRequest(AMRMClient.ContainerRequest req) {
-    providerService.addContainerRequest(req);
-  }
-
-  @Override
-  public int cancelContainerRequests(Priority priority1,
-      Priority priority2,
-      int count) {
-    return providerService.cancelContainerRequests(priority1, priority2, count);
-  }
-
-  @Override
-  public void cancelSingleRequest(AMRMClient.ContainerRequest request) {
-    providerService.cancelSingleRequest(request);
-  }
-
-  @Override
-  public void updateBlacklist(List<String> blacklistAdditions,
-      List<String> blacklistRemovals) {
-    providerService.updateBlacklist(blacklistAdditions, blacklistRemovals);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
index fbd408e..4d483c7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
@@ -70,11 +70,12 @@ public class SliderClusterProtocolPBImpl implements SliderClusterProtocolPB {
   }
 
   @Override
-  public Messages.FlexClusterResponseProto flexCluster(RpcController controller,
-      Messages.FlexClusterRequestProto request) throws ServiceException {
+  public Messages.FlexComponentResponseProto flexComponent(
+      RpcController controller, Messages.FlexComponentRequestProto request)
+      throws ServiceException {
     try {
-      return real.flexCluster(request);
-    } catch (Exception e) {
+      return real.flexComponent(request);
+    } catch (IOException e) {
       throw wrap(e);
     }
   }
@@ -90,19 +91,6 @@ public class SliderClusterProtocolPBImpl implements SliderClusterProtocolPB {
     }
   }
 
-
-  @Override
-  public Messages.GetInstanceDefinitionResponseProto getInstanceDefinition(
-    RpcController controller,
-    Messages.GetInstanceDefinitionRequestProto request)
-      throws ServiceException {
-    try {
-      return real.getInstanceDefinition(request);
-    } catch (Exception e) {
-      throw wrap(e);
-    }
-  }
-
   @Override
   public Messages.ListNodeUUIDsByRoleResponseProto listNodeUUIDsByRole(
     RpcController controller,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
index 448c6f3..c60d609 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
@@ -110,10 +110,10 @@ public class SliderClusterProtocolProxy implements SliderClusterProtocol {
   }
 
   @Override
-  public Messages.FlexClusterResponseProto flexCluster(Messages.FlexClusterRequestProto request)
-      throws IOException {
+  public Messages.FlexComponentResponseProto flexComponent(
+      Messages.FlexComponentRequestProto request) throws IOException {
     try {
-      return endpoint.flexCluster(NULL_CONTROLLER, request);
+      return endpoint.flexComponent(NULL_CONTROLLER, request);
     } catch (ServiceException e) {
       throw convert(e);
     }
@@ -131,19 +131,6 @@ public class SliderClusterProtocolProxy implements SliderClusterProtocol {
     }
   }
 
-
-  @Override
-  public Messages.GetInstanceDefinitionResponseProto getInstanceDefinition(
-    Messages.GetInstanceDefinitionRequestProto request) throws
-                                                        IOException,
-                                                        YarnException {
-    try {
-      return endpoint.getInstanceDefinition(NULL_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw convert(e);
-    }
-  }
-
   @Override
   public Messages.ListNodeUUIDsByRoleResponseProto listNodeUUIDsByRole(Messages.ListNodeUUIDsByRoleRequestProto request) throws
                                                                                                                          IOException,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
index 70c2f05..344495b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
@@ -24,9 +24,9 @@ import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import org.apache.slider.api.ClusterDescription;
 import org.apache.slider.api.SliderClusterProtocol;
 import org.apache.slider.api.proto.Messages;
+import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.types.ApplicationLivenessInformation;
 import org.apache.slider.api.types.ComponentInformation;
 import org.apache.slider.api.types.ContainerInformation;
@@ -38,6 +38,7 @@ import org.apache.slider.core.exceptions.ServiceNotReadyException;
 import org.apache.slider.core.main.LauncherExitCodes;
 import org.apache.slider.core.persist.AggregateConfSerDeser;
 import org.apache.slider.core.persist.ConfTreeSerDeser;
+import org.apache.slider.core.persist.JsonSerDeser;
 import org.apache.slider.server.appmaster.AppMasterActionOperations;
 import org.apache.slider.server.appmaster.actions.ActionFlexCluster;
 import org.apache.slider.server.appmaster.actions.ActionHalt;
@@ -78,6 +79,9 @@ public class SliderIPCService extends AbstractService
   private final MetricsAndMonitoring metricsAndMonitoring;
   private final AppMasterActionOperations amOperations;
   private final ContentCache cache;
+  private static final JsonSerDeser<Application> jsonSerDeser =
+      new JsonSerDeser<Application>(Application.class);
+
 
   /**
    * This is the prefix used for metrics
@@ -195,17 +199,12 @@ public class SliderIPCService extends AbstractService
     return Messages.UpgradeContainersResponseProto.getDefaultInstance();
   }
 
-  @Override //SliderClusterProtocol
-  public Messages.FlexClusterResponseProto flexCluster(Messages.FlexClusterRequestProto request)
-      throws IOException {
+  @Override
+  public Messages.FlexComponentResponseProto flexComponent(
+      Messages.FlexComponentRequestProto request) throws IOException {
     onRpcCall("flex");
-    String payload = request.getClusterSpec();
-    ConfTreeSerDeser confTreeSerDeser = new ConfTreeSerDeser();
-    ConfTree updatedResources = confTreeSerDeser.fromJson(payload);
-    schedule(new ActionFlexCluster("flex", 1, TimeUnit.MILLISECONDS,
-        updatedResources));
-    return Messages.FlexClusterResponseProto.newBuilder().setResponse(
-        true).build();
+    schedule(new ActionFlexCluster("flex", 1, TimeUnit.MILLISECONDS, request));
+    return Messages.FlexComponentResponseProto.newBuilder().build();
   }
 
   @Override //SliderClusterProtocol
@@ -216,38 +215,10 @@ public class SliderIPCService extends AbstractService
     String result;
     //quick update
     //query and json-ify
-    ClusterDescription cd = state.refreshClusterStatus();
-    result = cd.toJsonString();
-    String stat = result;
+    Application application = state.refreshClusterStatus();
+    String stat = jsonSerDeser.toJson(application);
     return Messages.GetJSONClusterStatusResponseProto.newBuilder()
-                                                     .setClusterSpec(stat)
-                                                     .build();
-  }
-
-  @Override
-  public Messages.GetInstanceDefinitionResponseProto getInstanceDefinition(
-      Messages.GetInstanceDefinitionRequestProto request)
-      throws IOException, YarnException {
-
-    onRpcCall("getinstancedefinition");
-    String internal;
-    String resources;
-    String app;
-    AggregateConf instanceDefinition =
-        state.getInstanceDefinitionSnapshot();
-    internal = instanceDefinition.getInternal().toJson();
-    resources = instanceDefinition.getResources().toJson();
-    app = instanceDefinition.getAppConf().toJson();
-    assert internal != null;
-    assert resources != null;
-    assert app != null;
-    log.debug("Generating getInstanceDefinition Response");
-    Messages.GetInstanceDefinitionResponseProto.Builder builder =
-        Messages.GetInstanceDefinitionResponseProto.newBuilder();
-    builder.setInternal(internal);
-    builder.setResources(resources);
-    builder.setApplication(app);
-    return builder.build();
+        .setClusterSpec(stat).build();
   }
 
   @Override //SliderClusterProtocol

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
index 9a89c39..b31babc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
@@ -138,6 +138,7 @@ public class SecurityConfiguration {
 
   public File getKeytabFile(AggregateConf instanceDefinition)
       throws SliderException, IOException {
+    //TODO implement this for dash semantic
     String keytabFullPath = instanceDefinition.getAppConfOperations()
         .getComponent(SliderKeys.COMPONENT_AM)
         .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: YARN-6405. Improve configuring services through REST API. Contributed by Jian He

Posted by ji...@apache.org.
YARN-6405. Improve configuring services through REST API. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ace79d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ace79d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ace79d7

Branch: refs/heads/yarn-native-services
Commit: 2ace79d73426e007c88e9cb9e7acf913d6ca807b
Parents: 256a159
Author: Billie Rinaldi <bi...@apache.org>
Authored: Wed Apr 26 08:44:38 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:25 2017 -0700

----------------------------------------------------------------------
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |   4 +-
 .../api/impl/TestApplicationApiService.java     |  26 +-
 .../apache/slider/api/ServiceApiConstants.java  |  50 +++
 .../apache/slider/api/resource/ConfigFile.java  |  58 +--
 .../slider/api/resource/Configuration.java      |  39 +-
 .../org/apache/slider/client/SliderClient.java  |  16 +-
 .../org/apache/slider/common/SliderKeys.java    |   4 +-
 .../slider/core/launch/CommandLineBuilder.java  |  15 -
 .../docstore/ConfigurationResolver.java         |  24 --
 .../apache/slider/providers/ProviderRole.java   |  32 +-
 .../slider/providers/ProviderService.java       |   4 +-
 .../apache/slider/providers/ProviderUtils.java  | 279 +++++++++-----
 .../providers/docker/DockerProviderService.java |  50 +--
 .../server/appmaster/RoleLaunchService.java     |  24 +-
 .../server/appmaster/SliderAppMaster.java       |  77 ++--
 .../actions/RegisterComponentInstance.java      |  12 +-
 .../actions/UnregisterComponentInstance.java    |  16 +-
 .../server/appmaster/metrics/SliderMetrics.java |  23 ++
 .../appmaster/monkey/ChaosKillContainer.java    |   2 +-
 .../server/appmaster/rpc/SliderIPCService.java  |   2 +-
 .../slider/server/appmaster/state/AppState.java | 384 ++++++++++---------
 .../server/appmaster/state/RoleInstance.java    |  38 +-
 .../server/appmaster/state/RoleStatus.java      |   1 +
 .../state/StateAccessForProviders.java          |   1 +
 .../slider/util/RestApiErrorMessages.java       |   2 +-
 .../org/apache/slider/util/ServiceApiUtil.java  |  77 +++-
 .../TestMockAppStateDynamicHistory.java         |   8 +-
 .../TestMockAppStateFlexDynamicRoles.java       |   6 +-
 .../appstate/TestMockAppStateUniqueNames.java   |  77 +++-
 .../TestMockContainerResourceAllocations.java   |  11 +
 .../model/mock/BaseMockAppStateTest.java        |   9 +-
 .../model/mock/MockProviderService.java         |   4 +-
 32 files changed, 850 insertions(+), 525 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
index e9239e4..82cc30f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -347,10 +347,10 @@ definitions:
         description: The absolute path that this configuration file should be mounted as, in the application container.
       src_file:
         type: string
-        description: Required for type template. This provides the source location of the template which needs to be mounted as dest_file post property substitutions. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, etc.
+        description: This provides the source location of the configuration file, the content of which is dumped to dest_file post property substitutions, in the format as specified in type. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported.
       props:
         type: object
-        description: A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If the type is template then the attribute src_file is mandatory and the src_file content is dumped to dest_file post property substitutions.
+        description: A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any, existing properties in src_file or be added as new properties in src_file.
   Container:
     description: An instance of a running application container.
     properties:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java
index abad34e..6e077d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java
@@ -63,7 +63,7 @@ public class TestApplicationApiService {
 
     // no name
     try {
-      ServiceApiUtil.validateApplicationPostPayload(app);
+      ServiceApiUtil.validateApplicationPayload(app, null);
       Assert.fail(EXCEPTION_PREFIX + "application with no name");
     } catch (IllegalArgumentException e) {
       Assert.assertEquals(ERROR_APPLICATION_NAME_INVALID, e.getMessage());
@@ -74,7 +74,7 @@ public class TestApplicationApiService {
     for (String badName : badNames) {
       app.setName(badName);
       try {
-        ServiceApiUtil.validateApplicationPostPayload(app);
+        ServiceApiUtil.validateApplicationPayload(app, null);
         Assert.fail(EXCEPTION_PREFIX + "application with bad name " + badName);
       } catch (IllegalArgumentException e) {
         Assert.assertEquals(ERROR_APPLICATION_NAME_INVALID_FORMAT,
@@ -85,7 +85,7 @@ public class TestApplicationApiService {
     // no artifact
     app.setName("finance_home");
     try {
-      ServiceApiUtil.validateApplicationPostPayload(app);
+      ServiceApiUtil.validateApplicationPayload(app, null);
       Assert.fail(EXCEPTION_PREFIX + "application with no artifact");
     } catch (IllegalArgumentException e) {
       Assert.assertEquals(ERROR_ARTIFACT_INVALID, e.getMessage());
@@ -95,7 +95,7 @@ public class TestApplicationApiService {
     Artifact artifact = new Artifact();
     app.setArtifact(artifact);
     try {
-      ServiceApiUtil.validateApplicationPostPayload(app);
+      ServiceApiUtil.validateApplicationPayload(app, null);
       Assert.fail(EXCEPTION_PREFIX + "application with no artifact id");
     } catch (IllegalArgumentException e) {
       Assert.assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage());
@@ -106,7 +106,7 @@ public class TestApplicationApiService {
     artifact.setId("app.io/hbase:facebook_0.2");
     app.setNumberOfContainers(5l);
     try {
-      ServiceApiUtil.validateApplicationPostPayload(app);
+      ServiceApiUtil.validateApplicationPayload(app, null);
     } catch (IllegalArgumentException e) {
       logger.error("application attributes specified should be valid here", e);
       Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
@@ -128,7 +128,7 @@ public class TestApplicationApiService {
     // resource not specified
     artifact.setId("docker.io/centos:centos7");
     try {
-      ServiceApiUtil.validateApplicationPostPayload(app);
+      ServiceApiUtil.validateApplicationPayload(app, null);
       Assert.fail(EXCEPTION_PREFIX + "application with no resource");
     } catch (IllegalArgumentException e) {
       Assert.assertEquals(ERROR_RESOURCE_INVALID, e.getMessage());
@@ -138,7 +138,7 @@ public class TestApplicationApiService {
     Resource res = new Resource();
     app.setResource(res);
     try {
-      ServiceApiUtil.validateApplicationPostPayload(app);
+      ServiceApiUtil.validateApplicationPayload(app, null);
       Assert.fail(EXCEPTION_PREFIX + "application with no memory");
     } catch (IllegalArgumentException e) {
       Assert.assertEquals(ERROR_RESOURCE_MEMORY_INVALID, e.getMessage());
@@ -149,7 +149,7 @@ public class TestApplicationApiService {
     res.setMemory("100mb");
     res.setCpus(-2);
     try {
-      ServiceApiUtil.validateApplicationPostPayload(app);
+      ServiceApiUtil.validateApplicationPayload(app, null);
       Assert.fail(
           EXCEPTION_PREFIX + "application with invalid no of cpups");
     } catch (IllegalArgumentException e) {
@@ -159,17 +159,17 @@ public class TestApplicationApiService {
     // number of containers not specified
     res.setCpus(2);
     try {
-      ServiceApiUtil.validateApplicationPostPayload(app);
+      ServiceApiUtil.validateApplicationPayload(app, null);
       Assert.fail(
           EXCEPTION_PREFIX + "application with no container count");
     } catch (IllegalArgumentException e) {
-      Assert.assertEquals(ERROR_CONTAINERS_COUNT_INVALID, e.getMessage());
+      Assert.assertTrue(e.getMessage().contains(ERROR_CONTAINERS_COUNT_INVALID));
     }
 
     // specifying profile along with cpus/memory raises exception
     res.setProfile("hbase_finance_large");
     try {
-      ServiceApiUtil.validateApplicationPostPayload(app);
+      ServiceApiUtil.validateApplicationPayload(app, null);
       Assert.fail(EXCEPTION_PREFIX
           + "application with resource profile along with cpus/memory");
     } catch (IllegalArgumentException e) {
@@ -182,7 +182,7 @@ public class TestApplicationApiService {
     res.setCpus(null);
     res.setMemory(null);
     try {
-      ServiceApiUtil.validateApplicationPostPayload(app);
+      ServiceApiUtil.validateApplicationPayload(app, null);
       Assert.fail(EXCEPTION_PREFIX
           + "application with resource profile only - NOT SUPPORTED");
     } catch (IllegalArgumentException e) {
@@ -198,7 +198,7 @@ public class TestApplicationApiService {
     // everything valid here
     app.setNumberOfContainers(5l);
     try {
-      ServiceApiUtil.validateApplicationPostPayload(app);
+      ServiceApiUtil.validateApplicationPayload(app, null);
     } catch (IllegalArgumentException e) {
       logger.error("application attributes specified should be valid here", e);
       Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ServiceApiConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ServiceApiConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ServiceApiConstants.java
new file mode 100644
index 0000000..5f76f19
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ServiceApiConstants.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import static org.apache.slider.util.ServiceApiUtil.$;
+
+/**
+ * This class defines constants that can be used in input spec for
+ * variable substitutions
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public interface ServiceApiConstants {
+
+  // Constants for service
+  String SERVICE_NAME = $("SERVICE_NAME");
+
+  String SERVICE_NAME_LC = $("SERVICE_NAME.lc");
+
+  // Constants for component
+  String COMPONENT_NAME = $("COMPONENT_NAME");
+
+  String COMPONENT_NAME_LC = $("COMPONENT_NAME.lc");
+
+  String COMPONENT_INSTANCE_NAME = $("COMPONENT_INSTANCE_NAME");
+
+  // Constants for component instance
+  String COMPONENT_ID = $("COMPONENT_ID");
+
+  String CONTAINER_ID = $("CONTAINER_ID");
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java
index cdc96b8..b4040b6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java
@@ -17,20 +17,19 @@
 
 package org.apache.slider.api.resource;
 
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonValue;
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
 
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
 import java.io.Serializable;
+import java.util.HashMap;
 import java.util.Map;
 import java.util.Objects;
 
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonValue;
-
 /**
  * A config file that needs to be created and made available as a volume in an
  * application component container.
@@ -45,7 +44,7 @@ public class ConfigFile implements Serializable {
 
   public enum TypeEnum {
     XML("XML"), PROPERTIES("PROPERTIES"), JSON("JSON"), YAML("YAML"), TEMPLATE(
-        "TEMPLATE"), ENV("ENV"), HADOOP_XML("HADOOP_XML");
+        "TEMPLATE"), ENV("ENV"), HADOOP_XML("HADOOP_XML"),;
 
     private String value;
 
@@ -63,7 +62,18 @@ public class ConfigFile implements Serializable {
   private TypeEnum type = null;
   private String destFile = null;
   private String srcFile = null;
-  private Map<String, String> props = null;
+  private Map<String, String> props = new HashMap<>();
+
+  public ConfigFile copy() {
+    ConfigFile copy = new ConfigFile();
+    copy.setType(this.getType());
+    copy.setSrcFile(this.getSrcFile());
+    copy.setDestFile(this.getDestFile());
+    if (this.getProps() != null && !this.getProps().isEmpty()) {
+      copy.getProps().putAll(this.getProps());
+    }
+    return copy;
+  }
 
   /**
    * Config file in the standard format like xml, properties, json, yaml,
@@ -105,19 +115,20 @@ public class ConfigFile implements Serializable {
   }
 
   /**
-   * TODO this probably is not required for non-template configs. It is now used as symlink for localization for non-template configs - we could infer the name from destFile instead
-   *
-   * Required for type template. This provides the source location of the
-   * template which needs to be mounted as dest_file post property
-   * substitutions. Typically the src_file would point to a source controlled
-   * network accessible file maintained by tools like puppet, chef, etc.
+   * This provides the source location of the configuration file, the content
+   * of which is dumped to dest_file post property substitutions, in the format
+   * as specified in type. Typically the src_file would point to a source
+   * controlled network accessible file maintained by tools like puppet, chef,
+   * or hdfs etc. Currently, only hdfs is supported.
    **/
   public ConfigFile srcFile(String srcFile) {
     this.srcFile = srcFile;
     return this;
   }
 
-  @ApiModelProperty(example = "null", value = "Required for type template. This provides the source location of the template which needs to be mounted as dest_file post property substitutions. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, etc.")
+  @ApiModelProperty(example = "null", value = "This provides the source location of the configuration file, "
+      + "the content of which is dumped to dest_file post property substitutions, in the format as specified in type. "
+      + "Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported.")
   @JsonProperty("src_file")
   public String getSrcFile() {
     return srcFile;
@@ -129,17 +140,19 @@ public class ConfigFile implements Serializable {
   }
 
   /**
-   * A blob of key value pairs that will be dumped in the dest_file in the
-   * format as specified in type. If the type is template then the attribute
-   * src_file is mandatory and the src_file content is dumped to dest_file post
-   * property substitutions.
+   A blob of key value pairs that will be dumped in the dest_file in the format
+   as specified in type. If src_file is specified, src_file content are dumped
+   in the dest_file and these properties will overwrite, if any, existing
+   properties in src_file or be added as new properties in src_file.
    **/
   public ConfigFile props(Map<String, String> props) {
     this.props = props;
     return this;
   }
 
-  @ApiModelProperty(example = "null", value = "A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If the type is template then the attribute src_file is mandatory and the src_file content is dumped to dest_file post property substitutions.")
+  @ApiModelProperty(example = "null", value = "A blob of key value pairs that will be dumped in the dest_file in the format as specified in type."
+      + " If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any,"
+      + " existing properties in src_file or be added as new properties in src_file.")
   @JsonProperty("props")
   public Map<String, String> getProps() {
     return props;
@@ -175,8 +188,7 @@ public class ConfigFile implements Serializable {
     ConfigFile configFile = (ConfigFile) o;
     return Objects.equals(this.type, configFile.type)
         && Objects.equals(this.destFile, configFile.destFile)
-        && Objects.equals(this.srcFile, configFile.srcFile)
-        && Objects.equals(this.props, configFile.props);
+        && Objects.equals(this.srcFile, configFile.srcFile);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
index 7b3b93e..0df586c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
@@ -17,8 +17,11 @@
 
 package org.apache.slider.api.resource;
 
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
+import org.apache.commons.lang.StringUtils;
 
 import java.io.Serializable;
 import java.util.ArrayList;
@@ -27,10 +30,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import org.apache.commons.lang.StringUtils;
-
 /**
  * Set of configuration properties that can be injected into the application
  * components via envs, files and custom pluggable helper docker containers.
@@ -156,6 +155,13 @@ public class Configuration implements Serializable {
     return properties.get(name.trim());
   }
 
+  public String getEnv(String name) {
+    if (name == null) {
+      return null;
+    }
+    return env.get(name.trim());
+  }
+
   @Override
   public boolean equals(java.lang.Object o) {
     if (this == o) {
@@ -198,4 +204,29 @@ public class Configuration implements Serializable {
     }
     return o.toString().replace("\n", "\n    ");
   }
+
+  /**
+   * Merge all properties and envs from that configuration to this configration.
+   * For ConfigFiles, all properties and envs of that ConfigFile are merged into
+   * this ConfigFile.
+   */
+  public synchronized void mergeFrom(Configuration that) {
+    this.properties.putAll(that.getProperties());
+    this.env.putAll(that.getEnv());
+    Map<String, ConfigFile> thatMap = new HashMap<>();
+    for (ConfigFile file : that.getFiles()) {
+      thatMap.put(file.getDestFile(), file.copy());
+    }
+    for (ConfigFile thisFile : files) {
+      if(thatMap.containsKey(thisFile.getDestFile())) {
+        ConfigFile thatFile = thatMap.get(thisFile.getDestFile());
+        thisFile.getProps().putAll(thatFile.getProps());
+        thatMap.remove(thisFile.getDestFile());
+      }
+    }
+    // add remaining new files from that Configration
+    for (ConfigFile thatFile : thatMap.values()) {
+      files.add(thatFile.copy());
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index 2b0982f..7241374 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -653,7 +653,8 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
 
   public ApplicationId actionCreate(Application application)
       throws IOException, YarnException {
-    ServiceApiUtil.validateApplicationPostPayload(application);
+    ServiceApiUtil.validateApplicationPayload(application,
+        sliderFileSystem.getFileSystem());
     String appName = application.getName();
     validateClusterName(appName);
     verifyNoLiveApp(appName, "Create");
@@ -692,7 +693,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     boolean hasSliderAMLog4j =
         addAMLog4jResource(appName, conf, localResources);
     // copy jars to hdfs and add to localResources
-    Path tempPath = addJarResource(appName, localResources);
+    addJarResource(appName, localResources);
     // add keytab if in secure env
     addKeytabResourceIfSecure(sliderFileSystem, localResources, conf, appName);
     printLocalResources(localResources);
@@ -700,7 +701,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     //TODO SliderAMClientProvider#copyEnvVars
     //TODO localResource putEnv
 
-    Map<String, String> env = addAMEnv(conf, tempPath);
+    Map<String, String> env = addAMEnv(conf);
 
     // create AM CLI
     String cmdStr =
@@ -805,7 +806,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     return cmdStr;
   }
 
-  private Map<String, String> addAMEnv(Configuration conf, Path tempPath)
+  private Map<String, String> addAMEnv(Configuration conf)
       throws IOException {
     Map<String, String> env = new HashMap<>();
     ClasspathConstructor classpath =
@@ -819,6 +820,13 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     if (jaas != null) {
       env.put(HADOOP_JAAS_DEBUG, jaas);
     }
+    if (!UserGroupInformation.isSecurityEnabled()) {
+      String userName = UserGroupInformation.getCurrentUser().getUserName();
+      log.info("Run as user " + userName);
+      // HADOOP_USER_NAME env is used by UserGroupInformation when log in
+      // This env makes AM run as this user
+      env.put("HADOOP_USER_NAME", userName);
+    }
     env.putAll(getAmLaunchEnv(conf));
     log.info("AM env: \n{}", stringifyMap(env));
     return env;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
index 9a4fa6c..968a90b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
@@ -240,7 +240,7 @@ public interface SliderKeys extends SliderXmlConfKeys {
   String STDERR_AM = "slider-err.txt";
   String DEFAULT_GC_OPTS = "";
 
-  String HADOOP_USER_NAME = ApplicationConstants.Environment.USER.toString();
+  String HADOOP_USER_NAME = "HADOOP_USER_NAME";
   String HADOOP_PROXY_USER = "HADOOP_PROXY_USER";
   String SLIDER_PASSPHRASE = "SLIDER_PASSPHRASE";
 
@@ -306,7 +306,7 @@ public interface SliderKeys extends SliderXmlConfKeys {
   String CERT_FILE_LOCALIZATION_PATH = INFRA_RUN_SECURITY_DIR + "ca.crt";
 
   String AM_CONFIG_GENERATION = "am.config.generation";
-  String APP_CONF_DIR = "app/conf";
+  String APP_CONF_DIR = "conf";
 
   String APP_RESOURCES = "application.resources";
   String APP_RESOURCES_DIR = "app/resources";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/CommandLineBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/CommandLineBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/CommandLineBuilder.java
index dbaa981..5ab0532 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/CommandLineBuilder.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/CommandLineBuilder.java
@@ -32,7 +32,6 @@ import java.util.List;
 public class CommandLineBuilder {
   protected final List<String> argumentList = new ArrayList<>(20);
 
-
   /**
    * Add an entry to the command list
    * @param args arguments -these will be converted strings
@@ -44,15 +43,6 @@ public class CommandLineBuilder {
   }
 
   /**
-   * Get the value at an offset
-   * @param offset offset
-   * @return the value at that point
-   */
-  public String elt(int offset) {
-    return argumentList.get(offset);
-  }
-
-  /**
    * Get the number of arguments
    * @return an integer >= 0
    */
@@ -96,9 +86,4 @@ public class CommandLineBuilder {
   public String build() {
     return SliderUtils.join(argumentList, " ");
   }
-
-  public List<String> getArgumentList() {
-    return argumentList;
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigurationResolver.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigurationResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigurationResolver.java
deleted file mode 100644
index 88bac77..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigurationResolver.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.core.registry.docstore;
-
-public class ConfigurationResolver {
-  
-  
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
index cb39368..9cc48e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
@@ -20,6 +20,13 @@ package org.apache.slider.providers;
 
 import org.apache.slider.api.ResourceKeys;
 import org.apache.slider.api.resource.Component;
+import org.apache.slider.server.appmaster.state.AppState;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.atomic.AtomicLong;
 
 /**
  * Provider role and key for use in app requests.
@@ -36,7 +43,9 @@ public final class ProviderRole {
   public final long placementTimeoutSeconds;
   public final String labelExpression;
   public final Component component;
-
+  public AtomicLong componentIdCounter = null;
+  public AppState appState;
+  public Queue<String> failedInstanceName = new ConcurrentLinkedQueue<String>();
   public ProviderRole(String name, int id) {
     this(name,
         id,
@@ -69,7 +78,7 @@ public final class ProviderRole {
         nodeFailureThreshold,
         placementTimeoutSeconds,
         labelExpression,
-        new Component().name(name).numberOfContainers(0L));
+        new Component().name(name).numberOfContainers(0L), null);
   }
 
   /**
@@ -79,18 +88,13 @@ public final class ProviderRole {
    * @param id ID. This becomes the YARN priority
    * @param policy placement policy
    * @param nodeFailureThreshold threshold for node failures (within a reset interval)
-   * after which a node failure is considered an app failure
+* after which a node failure is considered an app failure
    * @param placementTimeoutSeconds for lax placement, timeout in seconds before
    * @param labelExpression label expression for requests; may be null
    */
-  public ProviderRole(String name,
-      String group,
-      int id,
-      int policy,
-      int nodeFailureThreshold,
-      long placementTimeoutSeconds,
-      String labelExpression,
-      Component component) {
+  public ProviderRole(String name, String group, int id, int policy,
+      int nodeFailureThreshold, long placementTimeoutSeconds,
+      String labelExpression, Component component, AppState state) {
     this.name = name;
     if (group == null) {
       this.group = name;
@@ -103,9 +107,13 @@ public final class ProviderRole {
     this.placementTimeoutSeconds = placementTimeoutSeconds;
     this.labelExpression = labelExpression;
     this.component = component;
-
+    if(component.getUniqueComponentSupport()) {
+      componentIdCounter = new AtomicLong(0);
+    }
+    this.appState = state;
   }
 
+
   @Override
   public boolean equals(Object o) {
     if (this == o) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
index c31b2ac..7e92bfa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
@@ -26,6 +26,7 @@ import org.apache.slider.api.resource.Application;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.launch.ContainerLauncher;
+import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
 import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
 
@@ -38,7 +39,8 @@ public interface ProviderService extends Service {
    */
   void buildContainerLaunchContext(ContainerLauncher containerLauncher,
       Application application, Container container, ProviderRole providerRole,
-      SliderFileSystem sliderFileSystem) throws IOException, SliderException;
+      SliderFileSystem sliderFileSystem, RoleInstance roleInstance)
+      throws IOException, SliderException;
 
 
   void setAMState(StateAccessForProviders stateAccessForProviders);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
index f8ec976..d384585 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
@@ -20,10 +20,10 @@ package org.apache.slider.providers;
 
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
 import org.apache.hadoop.registry.client.types.ServiceRecord;
 import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
@@ -32,7 +32,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.slider.api.ClusterNode;
-import org.apache.slider.api.InternalKeys;
 import org.apache.slider.api.OptionKeys;
 import org.apache.slider.api.ResourceKeys;
 import org.apache.slider.api.RoleKeys;
@@ -59,6 +58,7 @@ import org.slf4j.Logger;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.io.OutputStream;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -66,8 +66,12 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
+import java.util.concurrent.ExecutionException;
 import java.util.regex.Pattern;
 
+import static org.apache.slider.api.ServiceApiConstants.*;
+import static org.apache.slider.util.ServiceApiUtil.$;
+
 /**
  * This is a factoring out of methods handy for providers. It's bonded to a log
  * at construction time.
@@ -89,7 +93,7 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
    * Add oneself to the classpath. This does not work
    * on minicluster test runs where the JAR is not built up.
    * @param providerResources map of provider resources to add these entries to
-   * @param provider provider to add
+   * @param providerClass provider to add
    * @param jarName name of the jar to use
    * @param sliderFileSystem target filesystem
    * @param tempPath path in the cluster FS for temp files
@@ -157,14 +161,19 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
         libDir, libLocalSrcDir);
   }
 
-  // Build key -> value map
-  // value will be substituted by corresponding data in tokenMap
-  public Map<String, String> substituteConfigs(Map<String, String> configs,
+  public static String substituteStrWithTokens(String content,
+      Map<String, String> tokensForSubstitution) {
+    for (Map.Entry<String, String> token : tokensForSubstitution.entrySet()) {
+      content =
+          content.replaceAll(Pattern.quote(token.getKey()), token.getValue());
+    }
+    return content;
+  }
+
+  // configs will be substituted by corresponding env in tokenMap
+  public void substituteMapWithTokens(Map<String, String> configs,
       Map<String, String> tokenMap) {
-    String format = "${%s}";
-    Map<String, String> filteredOptions = new HashMap<>();
     for (Map.Entry<String, String> entry : configs.entrySet()) {
-      String key = entry.getKey();
       String value = entry.getValue();
       if (tokenMap != null) {
         for (Map.Entry<String, String> token : tokenMap.entrySet()) {
@@ -172,10 +181,8 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
               value.replaceAll(Pattern.quote(token.getKey()), token.getValue());
         }
       }
-      filteredOptions.put(String.format(format, key), value);
+      entry.setValue(value);
     }
-
-    return filteredOptions;
   }
 
   /**
@@ -249,78 +256,95 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
     }
   }
 
+  public static void addEnvForSubstitution(Map<String, String> env,
+      Map<String, String> tokensForSubstitution) {
+    if (env == null || env.isEmpty() || tokensForSubstitution == null
+        || tokensForSubstitution.isEmpty()) {
+      return;
+    }
+    for (Map.Entry<String, String> entry : env.entrySet()) {
+      tokensForSubstitution.put($(entry.getKey()), entry.getValue());
+    }
+  }
 
   // 1. Create all config files for a component on hdfs for localization
   // 2. Add the config file to localResource
-  //TODO handle Template format config file
-  public void createConfigFileAndAddLocalResource(ContainerLauncher launcher,
-      SliderFileSystem fs, Component component,
-      Map<String, String> tokensForSubstitution,
-      StateAccessForProviders amState) throws IOException {
+  public synchronized void createConfigFileAndAddLocalResource(
+      ContainerLauncher launcher, SliderFileSystem fs, Component component,
+      Map<String, String> tokensForSubstitution, RoleInstance roleInstance)
+      throws IOException {
     Path compDir =
         new Path(new Path(fs.getAppDir(), "components"), component.getName());
-    if (!fs.getFileSystem().exists(compDir)) {
-      fs.getFileSystem().mkdirs(compDir,
+    Path compInstanceDir =
+        new Path(compDir, roleInstance.getCompInstanceName());
+    if (!fs.getFileSystem().exists(compInstanceDir)) {
+      fs.getFileSystem().mkdirs(compInstanceDir,
           new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE));
-      log.info("Creating component dir: " + compDir);
+      roleInstance.compInstanceDir = compInstanceDir;
+      log.info("Creating component instance dir: " + compInstanceDir);
     } else {
-      log.info("Component conf dir already exists: " + compDir);
-      return;
+      log.info("Component instance conf dir already exists: " + compInstanceDir);
     }
 
-    for (ConfigFile configFile : component.getConfiguration().getFiles()) {
-      String fileName = configFile.getSrcFile();
+    // add Configuration#env into tokens substitution
+    addEnvForSubstitution(component.getConfiguration().getEnv(),
+        tokensForSubstitution);
+
+    log.info("Tokens substitution for component: " + roleInstance
+        .getCompInstanceName() + System.lineSeparator()
+        + tokensForSubstitution);
+
+    for (ConfigFile originalFile : component.getConfiguration().getFiles()) {
+      ConfigFile configFile = originalFile.copy();
+      String fileName = new Path(configFile.getDestFile()).getName();
+
       // substitute file name
       for (Map.Entry<String, String> token : tokensForSubstitution.entrySet()) {
         configFile.setDestFile(configFile.getDestFile()
             .replaceAll(Pattern.quote(token.getKey()), token.getValue()));
       }
-      // substitute configs
-      substituteConfigs(configFile.getProps(), tokensForSubstitution);
-
-      // write configs onto hdfs
-      PublishedConfiguration publishedConfiguration =
-          new PublishedConfiguration(fileName,
-              configFile.getProps().entrySet());
-      Path remoteFile = new Path(compDir, fileName);
+
+      Path remoteFile = new Path(compInstanceDir, fileName);
       if (!fs.getFileSystem().exists(remoteFile)) {
-        synchronized (this) {
-          if (!fs.getFileSystem().exists(remoteFile)) {
-            PublishedConfigurationOutputter configurationOutputter =
-                PublishedConfigurationOutputter.createOutputter(
-                    ConfigFormat.resolve(configFile.getType().toString()),
-                    publishedConfiguration);
-            FSDataOutputStream os = null;
-            try {
-              os = fs.getFileSystem().create(remoteFile);
-              configurationOutputter.save(os);
-              os.flush();
-              log.info("Created config file on hdfs: " + remoteFile);
-            } finally {
-              IOUtils.closeStream(os);
-            }
+        log.info("Saving config file on hdfs for component " + roleInstance
+            .getCompInstanceName() + ": " + configFile);
+
+        if (configFile.getSrcFile() != null) {
+          // Load config file template
+          switch (configFile.getType()) {
+          case HADOOP_XML:
+            // Hadoop_xml_template
+            resolveHadoopXmlTemplateAndSaveOnHdfs(fs.getFileSystem(),
+                tokensForSubstitution, configFile, remoteFile, roleInstance);
+            break;
+          case TEMPLATE:
+            // plain-template
+            resolvePlainTemplateAndSaveOnHdfs(fs.getFileSystem(),
+                tokensForSubstitution, configFile, remoteFile, roleInstance);
+            break;
+          default:
+            log.info("Not supporting loading src_file for " + configFile);
+            break;
           }
+        } else {
+          // non-template
+          resolveNonTemplateConfigsAndSaveOnHdfs(fs, tokensForSubstitution,
+              roleInstance, configFile, fileName, remoteFile);
         }
       }
 
-      // Publish configs
-      amState.getPublishedSliderConfigurations()
-          .put(configFile.getSrcFile(), publishedConfiguration);
-
       // Add resource for localization
       LocalResource configResource =
           fs.createAmResource(remoteFile, LocalResourceType.FILE);
       File destFile = new File(configFile.getDestFile());
-      //TODO why to we need to differetiate  RESOURCE_DIR vs APP_CONF_DIR
+      String symlink = APP_CONF_DIR + "/" + fileName;
       if (destFile.isAbsolute()) {
-        String symlink = RESOURCE_DIR + "/" + fileName;
         launcher.addLocalResource(symlink, configResource,
             configFile.getDestFile());
         log.info("Add config file for localization: " + symlink + " -> "
             + configResource.getResource().getFile() + ", dest mount path: "
             + configFile.getDestFile());
       } else {
-        String symlink = APP_CONF_DIR + "/" + fileName;
         launcher.addLocalResource(symlink, configResource);
         log.info("Add config file for localization: " + symlink + " -> "
             + configResource.getResource().getFile());
@@ -328,23 +352,110 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
     }
   }
 
+  private void resolveNonTemplateConfigsAndSaveOnHdfs(SliderFileSystem fs,
+      Map<String, String> tokensForSubstitution, RoleInstance roleInstance,
+      ConfigFile configFile, String fileName, Path remoteFile)
+      throws IOException {
+    // substitute non-template configs
+    substituteMapWithTokens(configFile.getProps(), tokensForSubstitution);
+
+    // write configs onto hdfs
+    PublishedConfiguration publishedConfiguration =
+        new PublishedConfiguration(fileName,
+            configFile.getProps().entrySet());
+    if (!fs.getFileSystem().exists(remoteFile)) {
+      PublishedConfigurationOutputter configurationOutputter =
+          PublishedConfigurationOutputter.createOutputter(
+              ConfigFormat.resolve(configFile.getType().toString()),
+              publishedConfiguration);
+      try (FSDataOutputStream os = fs.getFileSystem().create(remoteFile)) {
+        configurationOutputter.save(os);
+        os.flush();
+      }
+    } else {
+      log.info("Component instance = " + roleInstance.getCompInstanceName()
+              + ", config file already exists: " + remoteFile);
+    }
+  }
+
+  // 1. substitute config template - only handle hadoop_xml format
+  // 2. save on hdfs
+  @SuppressWarnings("unchecked")
+  private void resolveHadoopXmlTemplateAndSaveOnHdfs(FileSystem fs,
+      Map<String, String> tokensForSubstitution, ConfigFile configFile,
+      Path remoteFile, RoleInstance roleInstance) throws IOException {
+    Map<String, String> conf;
+    try {
+      conf = (Map<String, String>) roleInstance.providerRole.
+          appState.configFileCache.get(configFile);
+    } catch (ExecutionException e) {
+      log.info("Failed to load config file: " + configFile, e);
+      return;
+    }
+    // make a copy for substitution
+    org.apache.hadoop.conf.Configuration confCopy =
+        new org.apache.hadoop.conf.Configuration(false);
+    for (Map.Entry<String, String> entry : conf.entrySet()) {
+      confCopy.set(entry.getKey(), entry.getValue());
+    }
+    // substitute properties
+    for (Map.Entry<String, String> entry : configFile.getProps().entrySet()) {
+      confCopy.set(entry.getKey(), entry.getValue());
+    }
+    // substitute env variables
+    for (Map.Entry<String, String> entry : confCopy) {
+      String val = entry.getValue();
+      if (val != null) {
+        for (Map.Entry<String, String> token : tokensForSubstitution
+            .entrySet()) {
+          val = val.replaceAll(Pattern.quote(token.getKey()), token.getValue());
+          confCopy.set(entry.getKey(), val);
+        }
+      }
+    }
+    // save on hdfs
+    try (OutputStream output = fs.create(remoteFile)) {
+      confCopy.writeXml(output);
+      log.info("Reading config from: " + configFile.getSrcFile()
+          + ", writing to: " + remoteFile);
+    }
+  }
+
+  // 1) read the template as a string
+  // 2) do token substitution
+  // 3) save on hdfs
+  private void resolvePlainTemplateAndSaveOnHdfs(FileSystem fs,
+      Map<String, String> tokensForSubstitution, ConfigFile configFile,
+      Path remoteFile, RoleInstance roleInstance) {
+    String content;
+    try {
+      content = (String) roleInstance.providerRole.appState.configFileCache
+          .get(configFile);
+    } catch (ExecutionException e) {
+      log.info("Failed to load config file: " + configFile, e);
+      return;
+    }
+    // substitute tokens
+    substituteStrWithTokens(content, tokensForSubstitution);
+
+    try (OutputStream output = fs.create(remoteFile)) {
+      org.apache.commons.io.IOUtils.write(content, output);
+    } catch (IOException e) {
+      log.info("Failed to create " + remoteFile);
+    }
+  }
+
   /**
    * Get initial token map to be substituted into config values.
    * @param appConf app configurations
-   * @param componentName component name
-   * @param componentGroup component group
-   * @param containerId container ID
    * @param clusterName app name
    * @return tokens to replace
    */
-  public Map<String, String> getStandardTokenMap(
-      Configuration appConf, Configuration componentConf, String componentName,
-      String componentGroup, String containerId, String clusterName) {
+  public Map<String, String> getStandardTokenMap(Configuration appConf,
+      RoleInstance roleInstance, String clusterName) {
 
     Map<String, String> tokens = new HashMap<>();
-    if (containerId != null) {
-      tokens.put("${CONTAINER_ID}", containerId);
-    }
+
     String nnuri = appConf.getProperty("fs.defaultFS");
     if (nnuri != null && !nnuri.isEmpty()) {
       tokens.put("${NN_URI}", nnuri);
@@ -352,34 +463,13 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
     }
     tokens.put("${ZK_HOST}", appConf.getProperty(OptionKeys.ZOOKEEPER_HOSTS));
     tokens.put("${DEFAULT_ZK_PATH}", appConf.getProperty(OptionKeys.ZOOKEEPER_PATH));
-    String prefix = componentConf.getProperty(ROLE_PREFIX);
-    String dataDirSuffix = "";
-    if (prefix == null) {
-      prefix = "";
-    } else {
-      dataDirSuffix = "_" + SliderUtils.trimPrefix(prefix);
-    }
-    tokens.put("${DEFAULT_DATA_DIR}",
-        appConf.getProperty(InternalKeys.INTERNAL_DATA_DIR_PATH)
-            + dataDirSuffix);
-    tokens.put("${JAVA_HOME}", appConf.getProperty(JAVA_HOME));
-    tokens.put("${COMPONENT_NAME}", componentName);
-    tokens.put("${COMPONENT_NAME.lc}", componentName.toLowerCase());
-    tokens.put("${COMPONENT_PREFIX}", prefix);
-    tokens.put("${COMPONENT_PREFIX.lc}", prefix.toLowerCase());
-    if (!componentName.equals(componentGroup) &&
-        componentName.startsWith(componentGroup)) {
-      tokens.put("${COMPONENT_ID}",
-          componentName.substring(componentGroup.length()));
-    }
-    if (clusterName != null) {
-      tokens.put("${CLUSTER_NAME}", clusterName);
-      tokens.put("${CLUSTER_NAME.lc}", clusterName.toLowerCase());
-      tokens.put("${APP_NAME}", clusterName);
-      tokens.put("${APP_NAME.lc}", clusterName.toLowerCase());
-    }
-    tokens.put("${APP_COMPONENT_NAME}", componentName);
-    tokens.put("${APP_COMPONENT_NAME.lc}", componentName.toLowerCase());
+    tokens.put(SERVICE_NAME_LC, clusterName.toLowerCase());
+    tokens.put(SERVICE_NAME, clusterName);
+    tokens.put(COMPONENT_NAME, roleInstance.role);
+    tokens.put(COMPONENT_NAME_LC, roleInstance.role.toLowerCase());
+    tokens.put(COMPONENT_INSTANCE_NAME, roleInstance.getCompInstanceName());
+    tokens.put(CONTAINER_ID, roleInstance.getContainerId().toString());
+    tokens.put(COMPONENT_ID, String.valueOf(roleInstance.componentId));
     return tokens;
   }
 
@@ -388,7 +478,7 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
    * @param tokens existing tokens
    * @param amState access to AM state
    */
-  public void addRoleHostTokens(Map<String, String> tokens,
+  public void addComponentHostTokens(Map<String, String> tokens,
       StateAccessForProviders amState) {
     if (amState == null) {
       return;
@@ -398,7 +488,7 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
       String tokenName = entry.getKey().toUpperCase(Locale.ENGLISH) + "_HOST";
       String hosts = StringUtils .join(",",
           getHostsList(entry.getValue().values(), true));
-      tokens.put("${" + tokenName + "}", hosts);
+      tokens.put($(tokenName), hosts);
     }
   }
 
@@ -443,7 +533,8 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
         // create and publish updated service record (including hostname & ip)
         ServiceRecord record = new ServiceRecord();
         record.set(YarnRegistryAttributes.YARN_ID, containerId);
-        record.description = roleName.replaceAll("_", "-");
+        String componentInstanceName = role.getCompInstanceName();
+        record.description = componentInstanceName.replaceAll("_", "-");
         record.set(YarnRegistryAttributes.YARN_PERSISTENCE,
             PersistencePolicies.CONTAINER);
         // TODO: use constants from YarnRegistryAttributes

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
index 511f7bc..93a481c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
@@ -46,6 +46,8 @@ import java.io.IOException;
 import java.util.Map;
 import java.util.Map.Entry;
 
+import static org.apache.slider.util.ServiceApiUtil.$;
+
 public class DockerProviderService extends AbstractService
     implements ProviderService, DockerKeys, SliderKeys {
 
@@ -70,14 +72,11 @@ public class DockerProviderService extends AbstractService
     this.yarnRegistry = yarnRegistry;
   }
 
+
   public void buildContainerLaunchContext(ContainerLauncher launcher,
       Application application, Container container, ProviderRole providerRole,
-      SliderFileSystem fileSystem)
+      SliderFileSystem fileSystem, RoleInstance roleInstance)
       throws IOException, SliderException {
-
-    String roleName = providerRole.name;
-    String roleGroup = providerRole.group;
-
     Component component = providerRole.component;
     launcher.setYarnDockerMode(true);
     launcher.setDockerImage(component.getArtifact().getId());
@@ -86,16 +85,12 @@ public class DockerProviderService extends AbstractService
     launcher.setRunPrivilegedContainer(component.getRunPrivilegedContainer());
 
     // Generate tokens (key-value pair) for config substitution.
-    Map<String, String> standardTokens = providerUtils
-        .getStandardTokenMap(application.getConfiguration(),
-            component.getConfiguration(), roleName, roleGroup,
-            container.getId().toString(), application.getName());
-    Map<String, String> tokensForSubstitution = providerUtils.substituteConfigs(
-            component.getConfiguration().getProperties(), standardTokens);
-
-    tokensForSubstitution.putAll(standardTokens);
+    // Get pre-defined tokens
+    Map<String, String> tokensForSubstitution = providerUtils
+        .getStandardTokenMap(application.getConfiguration(), roleInstance,
+            application.getName());
 
-    // Set the environment variables
+    // Set the environment variables in launcher
     launcher.putEnv(SliderUtils
         .buildEnvMap(component.getConfiguration(), tokensForSubstitution));
     launcher.setEnv("WORK_DIR", ApplicationConstants.Environment.PWD.$());
@@ -108,33 +103,26 @@ public class DockerProviderService extends AbstractService
     launcher.setEnv("LANGUAGE", "en_US.UTF-8");
 
     for (Entry<String, String> entry : launcher.getEnv().entrySet()) {
-      tokensForSubstitution.put("${" + entry.getKey() + "}", entry.getValue());
-    }
-
-    providerUtils.addRoleHostTokens(tokensForSubstitution, amState);
-
-    log.info("Token for substitution: " + tokensForSubstitution);
-
-    if (SliderUtils.isHadoopClusterSecure(getConfig())) {
-      //TODO localize key tabs, WHY is this code needed ? WHY DOES CONTAINER REQUIRE AM KEYTAB??
-      providerUtils.localizeServiceKeytabs(launcher, fileSystem, application);
+      tokensForSubstitution.put($(entry.getKey()), entry.getValue());
     }
+    providerUtils.addComponentHostTokens(tokensForSubstitution, amState);
 
     // create config file on hdfs and add local resource
     providerUtils.createConfigFileAndAddLocalResource(launcher, fileSystem,
-        component, tokensForSubstitution, amState);
+        component, tokensForSubstitution, roleInstance);
 
+    // substitute launch command
+    String launchCommand = ProviderUtils
+        .substituteStrWithTokens(component.getLaunchCommand(),
+            tokensForSubstitution);
     CommandLineBuilder operation = new CommandLineBuilder();
-    operation.add(component.getLaunchCommand());
-    operation.add("> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/"
-        + OUT_FILE + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/"
-        + ERR_FILE);
+    operation.add(launchCommand);
+    operation.addOutAndErrFiles(OUT_FILE, ERR_FILE);
     launcher.addCommand(operation.build());
 
     // publish exports
-    // TODO move this to app level, no need to do this for every container launch
     providerUtils
-        .substituteConfigs(application.getQuicklinks(), tokensForSubstitution);
+        .substituteMapWithTokens(application.getQuicklinks(), tokensForSubstitution);
     PublishedConfiguration pubconf = new PublishedConfiguration(QUICK_LINKS,
         application.getQuicklinks().entrySet());
     amState.getPublishedSliderConfigurations().put(QUICK_LINKS, pubconf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
index 70eab71..c53349f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
@@ -150,24 +150,26 @@ public class RoleLaunchService
         containerLauncher.setupUGI();
         containerLauncher.putEnv(envVars);
 
-        log.info("Launching container {} into RoleName = {}, RoleGroup = {}",
-            container.getId(), role.name, role.group);
-
-        provider.buildContainerLaunchContext(containerLauncher, application,
-            container, role, fs);
-
-        RoleInstance instance = new RoleInstance(container);
+        String failedInstance = role.failedInstanceName.poll();
+        RoleInstance instance;
+        if (failedInstance != null) {
+          instance = new RoleInstance(container, role, failedInstance);
+        } else {
+          instance = new RoleInstance(container, role);
+        }
         String[] envDescription = containerLauncher.dumpEnvToString();
-
         String commandsAsString = containerLauncher.getCommandsAsString();
+        log.info("Launching container {} for component instance = {}",
+            container.getId(), instance.getCompInstanceName());
         log.info("Starting container with command: {}", commandsAsString);
-
-        instance.providerRole = role;
         instance.command = commandsAsString;
         instance.role = role.name;
-        instance.group = role.group;
         instance.roleId = role.id;
         instance.environment = envDescription;
+
+        provider.buildContainerLaunchContext(containerLauncher, application,
+            container, role, fs, instance);
+
         long delay = role.component.getConfiguration()
             .getPropertyLong(AgentKeys.KEY_CONTAINER_LAUNCH_DELAY, 0);
         long maxDelay = getConfig()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
index 7473dab..1f379ea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -25,6 +25,7 @@ import com.google.protobuf.BlockingService;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -86,6 +87,7 @@ import org.apache.slider.api.RoleKeys;
 import org.apache.slider.api.proto.Messages;
 import org.apache.slider.api.proto.SliderClusterAPI;
 import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
 import org.apache.slider.common.SliderExitCodes;
 import org.apache.slider.common.SliderKeys;
 import org.apache.slider.common.params.AbstractActionArgs;
@@ -109,7 +111,6 @@ import org.apache.slider.core.main.ServiceLauncher;
 import org.apache.slider.core.persist.JsonSerDeser;
 import org.apache.slider.core.registry.info.CustomRegistryConstants;
 import org.apache.slider.providers.ProviderCompleted;
-import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.providers.ProviderService;
 import org.apache.slider.providers.SliderProviderFactory;
 import org.apache.slider.server.appmaster.actions.ActionHalt;
@@ -136,7 +137,6 @@ import org.apache.slider.server.appmaster.operations.RMOperationHandler;
 import org.apache.slider.server.appmaster.rpc.RpcBinder;
 import org.apache.slider.server.appmaster.rpc.SliderClusterProtocolPBImpl;
 import org.apache.slider.server.appmaster.rpc.SliderIPCService;
-import org.apache.slider.server.appmaster.security.SecurityConfiguration;
 import org.apache.slider.server.appmaster.state.AppState;
 import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
 import org.apache.slider.server.appmaster.state.ContainerAssignment;
@@ -170,7 +170,6 @@ import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -701,10 +700,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       registryOperations = startRegistryOperationsService();
       log.info(registryOperations.toString());
 
-      //build the role map
-      List<ProviderRole> providerRoles = Collections.EMPTY_LIST;
       // Start up the WebApp and track the URL for it
-
       // Web service endpoints: initialize
       WebAppApiImpl webAppApi =
           new WebAppApiImpl(
@@ -815,7 +811,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       //build the instance
       AppStateBindingInfo binding = new AppStateBindingInfo();
       binding.serviceConfig = serviceConf;
-      binding.roles = providerRoles;
       binding.fs = fs.getFileSystem();
       binding.historyPath = historyDir;
       binding.liveContainers = liveContainers;
@@ -873,6 +868,11 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     scheduleFailureWindowResets(application.getConfiguration());
     scheduleEscalation(application.getConfiguration());
 
+    for (Component component : application.getComponents()) {
+      // Merge app-level configuration into component level configuration
+      component.getConfiguration().mergeFrom(application.getConfiguration());
+    }
+
     try {
       // schedule YARN Registry registration
       queue(new ActionRegisterServiceInstance(appName, appid, application));
@@ -1170,22 +1170,22 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    * Handler for {@link RegisterComponentInstance action}
    * Register/re-register an ephemeral container that is already in the application state
    * @param id the component
-   * @param description component description
-   * @param type component type
    * @return true if the component is registered
    */
-  public boolean registerComponent(ContainerId id, String description,
-      String type) throws IOException {
+  public boolean registerComponent(ContainerId id, RoleInstance roleInstance)
+      throws IOException {
     RoleInstance instance = appState.getOwnedContainer(id);
     if (instance == null) {
       return false;
     }
     // this is where component registrations  go
-    log.info("Registering component {}", id);
     String cid = RegistryPathUtils.encodeYarnID(id.toString());
     ServiceRecord record = new ServiceRecord();
     record.set(YarnRegistryAttributes.YARN_ID, cid);
-    record.description = description;
+
+    record.description = roleInstance.getCompInstanceName();
+    log.info("Registering component " + roleInstance.getCompInstanceName()
+        + ", containerId = " + id);
     record.set(YarnRegistryAttributes.YARN_PERSISTENCE,
         PersistencePolicies.CONTAINER);
     setUserProvidedServiceRecordAttributes(
@@ -1194,7 +1194,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       yarnRegistryOperations.putComponent(cid, record);
     } catch (IOException e) {
       log.warn("Failed to register container {}/{}: {}",
-          id, description, e, e);
+          id, roleInstance.role, e, e);
       return false;
     }
     org.apache.slider.api.resource.Container container =
@@ -1203,6 +1203,8 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     container.setLaunchTime(new Date());
     container.setState(org.apache.slider.api.resource.ContainerState.INIT);
     container.setBareHost(instance.host);
+    // TODO differentiate component name and component instance name ?
+    container.setComponentName(roleInstance.getCompInstanceName());
     instance.providerRole.component.addContainer(container);
 
     if (timelineServiceEnabled) {
@@ -1228,20 +1230,38 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    * 
    * unregister a component. At the time this message is received,
    * the component may not have been registered
-   * @param id the component
    */
-  public void unregisterComponent(ContainerId id) {
-    log.info("Unregistering component {}", id);
+  public void unregisterComponent(RoleInstance roleInstance) {
+    ContainerId containerId = roleInstance.getContainerId();
+    log.info(
+        "Unregistering component instance " + roleInstance.getCompInstanceName()
+            + ", ContainerId = " + containerId);
     if (yarnRegistryOperations == null) {
-      log.warn("Processing unregister component event before initialization " +
-               "completed; init flag ={}", initCompleted);
+      log.warn("Processing unregister component event before initialization "
+          + "completed; init flag ={}", initCompleted);
       return;
     }
-    String cid = RegistryPathUtils.encodeYarnID(id.toString());
+    String cid = RegistryPathUtils.encodeYarnID(containerId.toString());
     try {
       yarnRegistryOperations.deleteComponent(cid);
     } catch (IOException e) {
-      log.warn("Failed to delete container {} : {}", id, e, e);
+      log.warn("Failed to delete container {} : {}", containerId, e, e);
+    }
+
+    // remove component instance dir
+    try {
+      FileSystem fs = getClusterFS().getFileSystem();
+      if (roleInstance.compInstanceDir != null && fs
+          .exists(roleInstance.compInstanceDir)) {
+        boolean deleted = fs.delete(roleInstance.compInstanceDir, true);
+        if (!deleted) {
+          log.warn("Failed to delete component instance dir: "
+              + roleInstance.compInstanceDir);
+        }
+      }
+    } catch (IOException e) {
+      log.error("Failed to delete component instance dir: "
+          + roleInstance.compInstanceDir, e);
     }
   }
 
@@ -1395,13 +1415,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     return exitCode;
   }
 
-    /**
-     * Get diagnostics info about containers
-     */
-  private String getContainerDiagnosticInfo() {
-
-    return appState.getContainerDiagnosticInfo();
-  }
 
   public Object getProxy(Class protocol, InetSocketAddress addr) {
     return yarnRPC.getProxy(protocol, addr, getConfig());
@@ -1492,7 +1505,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
     //for all the operations, exec them
     execute(operations);
-    log.info("Diagnostics: {}", getContainerDiagnosticInfo());
   }
 
   @Override //AMRMClientAsync
@@ -1519,8 +1531,9 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
       //  known nodes trigger notifications
       if(!result.unknownNode) {
-        queue(new UnregisterComponentInstance(containerId, 0,
-            TimeUnit.MILLISECONDS));
+        queue(new UnregisterComponentInstance(0,
+            TimeUnit.MILLISECONDS,  result.roleInstance));
+
         if (timelineServiceEnabled && result.roleInstance != null) {
           serviceTimelinePublisher
               .componentInstanceFinished(result.roleInstance);
@@ -1936,7 +1949,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       nmClientAsync.getContainerStatusAsync(containerId,
                                             cinfo.container.getNodeId());
       // push out a registration
-      queue(new RegisterComponentInstance(containerId, cinfo.role, cinfo.group,
+      queue(new RegisterComponentInstance(containerId, cinfo,
           0, TimeUnit.MILLISECONDS));
       
     } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/RegisterComponentInstance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/RegisterComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/RegisterComponentInstance.java
index 4cf4981..3c1bed8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/RegisterComponentInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/RegisterComponentInstance.java
@@ -22,6 +22,7 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.slider.server.appmaster.SliderAppMaster;
 import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.RoleInstance;
 
 import java.util.concurrent.TimeUnit;
 
@@ -33,18 +34,15 @@ import java.util.concurrent.TimeUnit;
 public class RegisterComponentInstance extends AsyncAction {
 
   public final ContainerId containerId;
-  public final String description;
-  public final String type;
+  public final RoleInstance roleInstance;
 
   public RegisterComponentInstance(ContainerId containerId,
-      String description,
-      String type,
+      RoleInstance roleInstance,
       long delay,
       TimeUnit timeUnit) {
     super("RegisterComponentInstance :" + containerId,
         delay, timeUnit);
-    this.description = description;
-    this.type = type;
+    this.roleInstance = roleInstance;
     Preconditions.checkArgument(containerId != null);
     this.containerId = containerId;
   }
@@ -54,6 +52,6 @@ public class RegisterComponentInstance extends AsyncAction {
       QueueAccess queueService,
       AppState appState) throws Exception {
 
-    appMaster.registerComponent(containerId, description, type);
+    appMaster.registerComponent(containerId, roleInstance);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/UnregisterComponentInstance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/UnregisterComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/UnregisterComponentInstance.java
index 575fe8f..ac86333 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/UnregisterComponentInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/UnregisterComponentInstance.java
@@ -21,31 +21,31 @@ package org.apache.slider.server.appmaster.actions;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.slider.server.appmaster.SliderAppMaster;
 import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.RoleInstance;
 
 import java.util.concurrent.TimeUnit;
 
 /**
  * Tell AM to unregister this component instance
- * {@link SliderAppMaster#unregisterComponent(ContainerId)}
  */
 public class UnregisterComponentInstance extends AsyncAction {
   
 
-  public final ContainerId containerId;
+  public final RoleInstance roleInstance;
 
-  public UnregisterComponentInstance(ContainerId containerId,
-      long delay,
-      TimeUnit timeUnit) {
-    super("UnregisterComponentInstance :" + containerId.toString(),
+  public UnregisterComponentInstance(long delay, TimeUnit timeUnit,
+      RoleInstance roleInstance) {
+    super("UnregisterComponentInstance :" + roleInstance.getCompInstanceName()
+            + ", ContainerId = " + roleInstance.getContainerId(),
         delay, timeUnit);
-    this.containerId = containerId;
+    this.roleInstance = roleInstance;
   }
 
   @Override
   public void execute(SliderAppMaster appMaster,
       QueueAccess queueService,
       AppState appState) throws Exception {
-    appMaster.unregisterComponent(containerId);
+    appMaster.unregisterComponent(roleInstance);
 
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java
index 5dcbe9b..cf607a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java
@@ -39,24 +39,34 @@ public class SliderMetrics implements MetricsSource {
 
   @Metric("containers requested")
   public MutableGaugeInt containersRequested;
+
   @Metric("anti-affinity containers pending")
   public MutableGaugeInt pendingAAContainers;
+
   @Metric("containers running")
   public MutableGaugeInt containersRunning;
+
   @Metric("containers desired")
   public MutableGaugeInt containersDesired;
+
   @Metric("containers completed")
   public MutableGaugeInt containersCompleted;
+
   @Metric("containers failed")
   public MutableGaugeInt containersFailed;
+
   @Metric("containers failed since last threshold")
   public MutableGaugeInt failedSinceLastThreshold;
+
   @Metric("containers preempted")
   public MutableGaugeInt containersPreempted;
+
   @Metric("containers exceeded limits")
   public MutableGaugeInt containersLimitsExceeded;
+
   @Metric("containers surplus")
   public MutableGaugeInt surplusContainers;
+
   @Metric("containers failed due to disk failure")
   public MutableGaugeInt containersDiskFailure;
 
@@ -80,5 +90,18 @@ public class SliderMetrics implements MetricsSource {
   public void tag(String name, String description, String value) {
     registry.tag(name, description, value);
   }
+
+  @Override public String toString() {
+    return "SliderMetrics{"
+        + "containersRequested=" + containersRequested.value()
+        + ", pendingAAContainers=" + pendingAAContainers.value()
+        + ", containersRunning=" + containersRunning.value()
+        + ", containersDesired=" + containersDesired.value()
+        + ", containersCompleted=" + containersCompleted.value()
+        + ", containersFailed=" + containersFailed.value()
+        + ", failedSinceLastThreshold=" + failedSinceLastThreshold.value()
+        + ", containersPreempted=" + containersPreempted.value()
+        + ", surplusContainers=" + surplusContainers.value() + '}';
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/monkey/ChaosKillContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/monkey/ChaosKillContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/monkey/ChaosKillContainer.java
index ae38e4c..1406fbe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/monkey/ChaosKillContainer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/monkey/ChaosKillContainer.java
@@ -77,7 +77,7 @@ public class ChaosKillContainer implements ChaosTarget {
       RoleInstance roleInstance = liveContainers.get(target);
       log.info("Killing {}", roleInstance);
 
-      queues.schedule(new ActionKillContainer(roleInstance.getId(),
+      queues.schedule(new ActionKillContainer(roleInstance.getContainerId(),
           DELAY, TimeUnit.MILLISECONDS, operationHandler));
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
index f88d586..22f9bc3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
@@ -283,7 +283,7 @@ public class SliderIPCService extends AbstractService
     //throws NoSuchNodeException if it is missing
     RoleInstance instance =
         state.getLiveInstanceByContainerID(containerID);
-    queue(new ActionKillContainer(instance.getId(), 0, TimeUnit.MILLISECONDS,
+    queue(new ActionKillContainer(instance.getContainerId(), 0, TimeUnit.MILLISECONDS,
         amOperations));
     Messages.KillContainerResponseProto.Builder builder =
         Messages.KillContainerResponseProto.newBuilder();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: YARN-6446. addendum patch. Contributed by Rohith Sharma K S.

Posted by ji...@apache.org.
YARN-6446. addendum patch. Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d83d95b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d83d95b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d83d95b

Branch: refs/heads/yarn-native-services
Commit: 6d83d95b69b3bc679cd9d10af6fc985db45a3c62
Parents: fbc0b97
Author: Sunil G <su...@apache.org>
Authored: Fri May 5 23:19:29 2017 +0530
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:25 2017 -0700

----------------------------------------------------------------------
 .../server/appmaster/model/mock/MockProviderService.java      | 7 +++++++
 1 file changed, 7 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d83d95b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java
index 4098cf7..a04a4b8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java
@@ -32,6 +32,7 @@ import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.providers.ProviderService;
 import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
+import org.apache.slider.server.appmaster.timelineservice.ServiceTimelinePublisher;
 import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
 
 import java.io.IOException;
@@ -139,4 +140,10 @@ public class MockProviderService implements ProviderService {
       ContainerStatus status) {
     return false;
   }
+
+  @Override
+  public void setServiceTimelinePublisher(
+      ServiceTimelinePublisher serviceTimelinePublisher) {
+
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: YARN-6716. Native services support for specifying component start order. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
YARN-6716. Native services support for specifying component start order. Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14b8371b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14b8371b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14b8371b

Branch: refs/heads/yarn-native-services
Commit: 14b8371b66685d75a87cfa37fb1c5d4422a956ef
Parents: 5560b31
Author: Jian He <ji...@apache.org>
Authored: Mon Jun 26 17:00:12 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:26 2017 -0700

----------------------------------------------------------------------
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |  13 +-
 .../org/apache/slider/api/InternalKeys.java     |  11 +
 .../java/org/apache/slider/api/StateValues.java |  22 +-
 .../slider/api/resource/ReadinessCheck.java     |  45 +-
 .../apache/slider/providers/ProviderRole.java   |  16 +-
 .../apache/slider/providers/ProviderUtils.java  |  15 -
 .../server/appmaster/RoleLaunchService.java     |   1 -
 .../server/appmaster/SliderAppMaster.java       |  30 +-
 .../actions/MonitorComponentInstances.java      |  37 ++
 .../slider/server/appmaster/state/AppState.java |  84 +++-
 .../server/appmaster/state/RoleStatus.java      |   6 -
 .../slider/server/servicemonitor/HttpProbe.java |  44 +-
 .../server/servicemonitor/MonitorKeys.java      | 251 +----------
 .../server/servicemonitor/MonitorUtils.java     |  67 +--
 .../slider/server/servicemonitor/PortProbe.java |  74 ++-
 .../slider/server/servicemonitor/Probe.java     |  66 ++-
 .../servicemonitor/ProbeFailedException.java    |  32 --
 .../ProbeInterruptedException.java              |  29 --
 .../server/servicemonitor/ProbePhase.java       |  56 ---
 .../servicemonitor/ProbeReportHandler.java      |  79 ----
 .../server/servicemonitor/ProbeStatus.java      |  14 -
 .../server/servicemonitor/ProbeWorker.java      | 446 -------------------
 .../server/servicemonitor/ReportingLoop.java    | 265 -----------
 .../slider/util/RestApiErrorMessages.java       |   4 +
 .../org/apache/slider/util/ServiceApiUtil.java  |  80 ++++
 .../appstate/TestMockAppStateDependencies.java  | 162 +++++++
 .../appstate/TestMockAppStateUniqueNames.java   |   4 -
 ...estRoleHistoryOutstandingRequestTracker.java |   2 +-
 .../server/servicemonitor/TestPortProbe.java    |  11 +-
 .../apache/slider/utils/TestServiceApiUtil.java |  51 +++
 30 files changed, 649 insertions(+), 1368 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
index f8ed4d5..05aad32 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -297,18 +297,21 @@ definitions:
   ReadinessCheck:
     description: A custom command or a pluggable helper container to determine the readiness of a container of a component. Readiness for every application is different. Hence the need for a simple interface, with scope to support advanced usecases.
     required:
-    - uri
+    - type
     properties:
       type:
         type: string
         description: E.g. HTTP (YARN will perform a simple REST call at a regular interval and expect a 204 No content).
         enum:
           - HTTP
-      uri:
-        type: string
-        description: Fully qualified REST uri endpoint.
+          - PORT
+      props:
+        type: object
+        description: A blob of key value pairs that will be used to configure the check.
+        additionalProperties:
+          type: string
       artifact:
-        description: Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits application owners who can run applications without any packaging modifications. Note, artifacts of type docker only is supported for now.
+        description: Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits application owners who can run applications without any packaging modifications. Note, artifacts of type docker only is supported for now. NOT IMPLEMENTED YET
         $ref: '#/definitions/Artifact'
   Configuration:
     description: Set of configuration properties that can be injected into the application components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/InternalKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/InternalKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/InternalKeys.java
index f690f5a..0e3b535 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/InternalKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/InternalKeys.java
@@ -196,4 +196,15 @@ public interface InternalKeys {
    * default value: {@value}
    */
   int DEFAULT_ESCALATION_CHECK_INTERVAL = 30;
+
+
+  /**
+   * interval between readiness checks: {@value}
+   */
+  String MONITOR_INTERVAL = "monitor.interval.seconds";
+
+  /**
+   * default value: {@value}
+   */
+  int DEFAULT_MONITOR_INTERVAL = 30;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/StateValues.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/StateValues.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/StateValues.java
index 03751e1..ad66a97 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/StateValues.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/StateValues.java
@@ -19,13 +19,15 @@
 package org.apache.slider.api;
 
 /**
- * Enumeration of state values
+ * Enumeration of state values.
  */
 public class StateValues {
 
+  private StateValues() {}
+
   /**
    * Specification is incomplete & cannot
-   * be used: {@value}
+   * be used: {@value}.
    */
   public static final int STATE_INCOMPLETE = 0;
 
@@ -42,12 +44,20 @@ public class StateValues {
    */
   public static final int STATE_LIVE = 3;
   /**
-   * Stopped
+   * Not ready.
+   */
+  public static final int STATE_NOT_READY = 4;
+  /**
+   * Ready.
+   */
+  public static final int STATE_READY = 5;
+  /**
+   * Stopped.
    */
-  public static final int STATE_STOPPED = 4;
+  public static final int STATE_STOPPED = 99;
   /**
-   * destroyed
+   * Destroyed.
    */
-  public static final int STATE_DESTROYED = 5;
+  public static final int STATE_DESTROYED = 100;
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ReadinessCheck.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ReadinessCheck.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ReadinessCheck.java
index 00bf29c..b3c85bd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ReadinessCheck.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ReadinessCheck.java
@@ -21,6 +21,8 @@ import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
 
 import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Map;
 import java.util.Objects;
 
 import com.fasterxml.jackson.annotation.JsonProperty;
@@ -39,7 +41,8 @@ public class ReadinessCheck implements Serializable {
   private static final long serialVersionUID = -3836839816887186801L;
 
   public enum TypeEnum {
-    HTTP("HTTP");
+    HTTP("HTTP"),
+    PORT("PORT");
 
     private String value;
 
@@ -55,7 +58,7 @@ public class ReadinessCheck implements Serializable {
   }
 
   private TypeEnum type = null;
-  private String uri = null;
+  private Map<String, String> props = new HashMap<String, String>();
   private Artifact artifact = null;
 
   /**
@@ -77,22 +80,27 @@ public class ReadinessCheck implements Serializable {
     this.type = type;
   }
 
-  /**
-   * Fully qualified REST uri endpoint.
-   **/
-  public ReadinessCheck uri(String uri) {
-    this.uri = uri;
+  public ReadinessCheck props(Map<String, String> props) {
+    this.props = props;
+    return this;
+  }
+
+  public ReadinessCheck putPropsItem(String key, String propsItem) {
+    this.props.put(key, propsItem);
     return this;
   }
 
-  @ApiModelProperty(example = "null", required = true, value = "Fully qualified REST uri endpoint.")
-  @JsonProperty("uri")
-  public String getUri() {
-    return uri;
+  /**
+   * A blob of key value pairs that will be used to configure the check.
+   * @return props
+   **/
+  @ApiModelProperty(example = "null", value = "A blob of key value pairs that will be used to configure the check.")
+  public Map<String, String> getProps() {
+    return props;
   }
 
-  public void setUri(String uri) {
-    this.uri = uri;
+  public void setProps(Map<String, String> props) {
+    this.props = props;
   }
 
   /**
@@ -128,23 +136,24 @@ public class ReadinessCheck implements Serializable {
       return false;
     }
     ReadinessCheck readinessCheck = (ReadinessCheck) o;
-    return Objects.equals(this.type, readinessCheck.type)
-        && Objects.equals(this.uri, readinessCheck.uri)
-        && Objects.equals(this.artifact, readinessCheck.artifact);
+    return Objects.equals(this.type, readinessCheck.type) &&
+        Objects.equals(this.props, readinessCheck.props) &&
+        Objects.equals(this.artifact, readinessCheck.artifact);
   }
 
   @Override
   public int hashCode() {
-    return Objects.hash(type, uri, artifact);
+    return Objects.hash(type, props, artifact);
   }
 
+
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();
     sb.append("class ReadinessCheck {\n");
 
     sb.append("    type: ").append(toIndentedString(type)).append("\n");
-    sb.append("    uri: ").append(toIndentedString(uri)).append("\n");
+    sb.append("    props: ").append(toIndentedString(props)).append("\n");
     sb.append("    artifact: ").append(toIndentedString(artifact)).append("\n");
     sb.append("}");
     return sb.toString();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
index 182e956..6fd85bf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
@@ -21,6 +21,8 @@ package org.apache.slider.providers;
 import org.apache.slider.api.ResourceKeys;
 import org.apache.slider.api.resource.Component;
 import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.servicemonitor.MonitorUtils;
+import org.apache.slider.server.servicemonitor.Probe;
 
 import java.util.Queue;
 import java.util.concurrent.ConcurrentLinkedQueue;
@@ -34,7 +36,6 @@ import java.util.concurrent.atomic.AtomicLong;
  */
 public final class ProviderRole {
   public final String name;
-  public final String group;
   public final int id;
   public int placementPolicy;
   public int nodeFailureThreshold;
@@ -43,6 +44,8 @@ public final class ProviderRole {
   public final Component component;
   public AtomicLong componentIdCounter = null;
   public Queue<RoleInstance> failedInstances = new ConcurrentLinkedQueue<>();
+  public Probe probe;
+
   public ProviderRole(String name, int id) {
     this(name,
         id,
@@ -69,7 +72,6 @@ public final class ProviderRole {
       long placementTimeoutSeconds,
       String labelExpression) {
     this(name,
-        name,
         id,
         policy,
         nodeFailureThreshold,
@@ -81,7 +83,6 @@ public final class ProviderRole {
   /**
    * Create a provider role with a role group
    * @param name role/component name
-   * @param group role/component group
    * @param id ID. This becomes the YARN priority
    * @param policy placement policy
    * @param nodeFailureThreshold threshold for node failures (within a reset interval)
@@ -89,15 +90,10 @@ public final class ProviderRole {
    * @param placementTimeoutSeconds for lax placement, timeout in seconds before
    * @param labelExpression label expression for requests; may be null
    */
-  public ProviderRole(String name, String group, int id, int policy,
+  public ProviderRole(String name, int id, int policy,
       int nodeFailureThreshold, long placementTimeoutSeconds,
       String labelExpression, Component component) {
     this.name = name;
-    if (group == null) {
-      this.group = name;
-    } else {
-      this.group = group;
-    }
     this.id = id;
     this.placementPolicy = policy;
     this.nodeFailureThreshold = nodeFailureThreshold;
@@ -107,6 +103,7 @@ public final class ProviderRole {
     if(component.getUniqueComponentSupport()) {
       componentIdCounter = new AtomicLong(0);
     }
+    this.probe = MonitorUtils.getProbe(component.getReadinessCheck());
   }
 
 
@@ -132,7 +129,6 @@ public final class ProviderRole {
   public String toString() {
     final StringBuilder sb = new StringBuilder("ProviderRole{");
     sb.append("name='").append(name).append('\'');
-    sb.append(", group=").append(group);
     sb.append(", id=").append(id);
     sb.append(", placementPolicy=").append(placementPolicy);
     sb.append(", nodeFailureThreshold=").append(nodeFailureThreshold);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
index d58ecaa..0da535e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
@@ -254,17 +254,6 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
     }
   }
 
-  public static void addEnvForSubstitution(Map<String, String> env,
-      Map<String, String> tokensForSubstitution) {
-    if (env == null || env.isEmpty() || tokensForSubstitution == null
-        || tokensForSubstitution.isEmpty()) {
-      return;
-    }
-    for (Map.Entry<String, String> entry : env.entrySet()) {
-      tokensForSubstitution.put($(entry.getKey()), entry.getValue());
-    }
-  }
-
   // 1. Create all config files for a component on hdfs for localization
   // 2. Add the config file to localResource
   public synchronized void createConfigFileAndAddLocalResource(
@@ -284,10 +273,6 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
       log.info("Component instance conf dir already exists: " + compInstanceDir);
     }
 
-    // add Configuration#env into tokens substitution
-    addEnvForSubstitution(component.getConfiguration().getEnv(),
-        tokensForSubstitution);
-
     log.info("Tokens substitution for component: " + roleInstance
         .getCompInstanceName() + System.lineSeparator()
         + tokensForSubstitution);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
index aa84940..f1b07f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
@@ -133,7 +133,6 @@ public class RoleLaunchService
       return "RoleLauncher{" +
              "container=" + container.getId() +
              ", containerRole='" + role.name + '\'' +
-             ", containerGroup='" + role.group + '\'' +
              '}';
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
index 0c3fcea..e565849 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -117,6 +117,7 @@ import org.apache.slider.server.appmaster.actions.ActionStopSlider;
 import org.apache.slider.server.appmaster.actions.ActionUpgradeContainers;
 import org.apache.slider.server.appmaster.actions.AsyncAction;
 import org.apache.slider.server.appmaster.actions.EscalateOutstandingRequests;
+import org.apache.slider.server.appmaster.actions.MonitorComponentInstances;
 import org.apache.slider.server.appmaster.actions.QueueExecutor;
 import org.apache.slider.server.appmaster.actions.QueueService;
 import org.apache.slider.server.appmaster.actions.RegisterComponentInstance;
@@ -340,7 +341,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    * ProviderService of this cluster
    */
   @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
-  private List<ProviderService> providers = new ArrayList<>();
+  private Set<ProviderService> providers = new HashSet<>();
 
   /**
    * The YARN registry service
@@ -868,6 +869,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
     scheduleFailureWindowResets(application.getConfiguration());
     scheduleEscalation(application.getConfiguration());
+    scheduleMonitoring(application.getConfiguration());
 
     try {
       // schedule YARN Registry registration
@@ -1644,9 +1646,22 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
         new RenewingAction<>(escalate, seconds, seconds, TimeUnit.SECONDS, 0);
     actionQueues.renewing("escalation", renew);
   }
-  
+
   /**
-   * Look at where the current node state is -and whether it should be changed
+   * Schedule monitor action
+   */
+  private void scheduleMonitoring(
+      org.apache.slider.api.resource.Configuration conf) {
+    MonitorComponentInstances monitor = new MonitorComponentInstances();
+    long seconds = conf.getPropertyLong(InternalKeys.MONITOR_INTERVAL,
+        InternalKeys.DEFAULT_MONITOR_INTERVAL);
+    RenewingAction<MonitorComponentInstances> renew =
+        new RenewingAction<>(monitor, seconds, seconds, TimeUnit.SECONDS, 0);
+    actionQueues.renewing("monitoring", renew);
+  }
+
+  /**
+   * Look at where the current node state is and whether it should be changed.
    * @param reason reason for operation
    */
   private synchronized void reviewRequestAndReleaseNodes(String reason) {
@@ -1711,6 +1726,15 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     execute(operations);
   }
 
+  public void monitorComponentInstances() {
+    // TODO use health checks?
+    // TODO publish timeline events for monitoring changes?
+    if (appState.monitorComponentInstances()) {
+      // monitoring change
+      reviewRequestAndReleaseNodes("monitoring change");
+    }
+  }
+
 
   /**
    * Shutdown operation: release all containers

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/MonitorComponentInstances.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/MonitorComponentInstances.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/MonitorComponentInstances.java
new file mode 100644
index 0000000..f7aa871
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/MonitorComponentInstances.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.actions;
+
+import org.apache.slider.server.appmaster.SliderAppMaster;
+import org.apache.slider.server.appmaster.state.AppState;
+
+/**
+ * Execute readiness checks on component instances.
+ */
+public class MonitorComponentInstances extends AsyncAction {
+
+  public MonitorComponentInstances() {
+    super("MonitorComponentInstance");
+  }
+
+  @Override
+  public void execute(SliderAppMaster appMaster, QueueAccess queueService,
+      AppState appState) throws Exception {
+    appMaster.monitorComponentInstances();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
index 1e1b377..52d67f8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
@@ -73,6 +73,7 @@ import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation;
 import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
 import org.apache.slider.server.appmaster.operations.UpdateBlacklistOperation;
 import org.apache.slider.server.appmaster.timelineservice.ServiceTimelinePublisher;
+import org.apache.slider.util.ServiceApiUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -344,16 +345,18 @@ public class AppState {
         DEFAULT_NODE_FAILURE_THRESHOLD);
     initGlobalTokensForSubstitute(binding);
 
-    //build the initial component list
+    // build the initial component list
+    Collection<Component> sortedComponents = ServiceApiUtil
+        .sortByDependencies(app.getComponents());
     int priority = 1;
-    for (Component component : app.getComponents()) {
+    for (Component component : sortedComponents) {
       priority = getNewPriority(priority);
       String name = component.getName();
       if (roles.containsKey(name)) {
         continue;
       }
       log.info("Adding component: " + name);
-      createComponent(name, name, component, priority++);
+      createComponent(name, component, priority++);
     }
 
     //then pick up the requirements
@@ -433,8 +436,8 @@ public class AppState {
             });
   }
 
-  public ProviderRole createComponent(String name, String group,
-      Component component, int priority) throws BadConfigException {
+  public ProviderRole createComponent(String name, Component component,
+      int priority) throws BadConfigException {
     org.apache.slider.api.resource.Configuration conf =
         component.getConfiguration();
     long placementTimeout = conf.getPropertyLong(PLACEMENT_ESCALATE_DELAY,
@@ -446,7 +449,7 @@ public class AppState {
     String label = conf.getProperty(YARN_LABEL_EXPRESSION,
         DEF_YARN_LABEL_EXPRESSION);
     ProviderRole newRole =
-        new ProviderRole(name, group, priority, (int)placementPolicy, threshold,
+        new ProviderRole(name, priority, (int)placementPolicy, threshold,
             placementTimeout, label, component);
     buildRole(newRole, component);
     log.info("Created a new role " + newRole);
@@ -1535,7 +1538,8 @@ public class AppState {
       allOperations.add(blacklistOperation);
     }
     for (RoleStatus roleStatus : getRoleStatusMap().values()) {
-      if (!roleStatus.isExcludeFromFlexing()) {
+      if (!roleStatus.isExcludeFromFlexing() &&
+          areDependenciesReady(roleStatus)) {
         List<AbstractRMOperation> operations = reviewOneRole(roleStatus);
         allOperations.addAll(operations);
       }
@@ -1543,6 +1547,47 @@ public class AppState {
     return allOperations;
   }
 
+  @VisibleForTesting
+  public boolean areDependenciesReady(RoleStatus roleStatus) {
+    List<String> dependencies = roleStatus.getProviderRole().component
+        .getDependencies();
+    if (SliderUtils.isEmpty(dependencies)) {
+      return true;
+    }
+    for (String dependency : dependencies) {
+      ProviderRole providerRole = roles.get(dependency);
+      if (providerRole == null) {
+        log.error("Couldn't find dependency {} for {} (should never happen)",
+            dependency, roleStatus.getName());
+        continue;
+      }
+      RoleStatus other = getRoleStatusMap().get(providerRole.id);
+      if (other.getRunning() < other.getDesired()) {
+        log.info("Dependency {} not satisfied for {}, only {} of {} instances" +
+            " running", dependency, roleStatus.getName(), other.getRunning(),
+            other.getDesired());
+        return false;
+      }
+      if (providerRole.probe == null) {
+        continue;
+      }
+      List<RoleInstance> dependencyInstances = enumLiveNodesInRole(
+          providerRole.name);
+      if (dependencyInstances.size() < other.getDesired()) {
+        log.info("Dependency {} not satisfied for {}, only {} of {} instances" +
+                " live", dependency, roleStatus.getName(),
+            dependencyInstances.size(), other.getDesired());
+        return false;
+      }
+      for (RoleInstance instance : dependencyInstances) {
+        if (instance.state != STATE_READY) {
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+
   /**
    * Check the "recent" failure threshold for a role
    * @param role role to examine
@@ -1620,6 +1665,31 @@ public class AppState {
     return operations;
   }
 
+  public synchronized boolean monitorComponentInstances() {
+    boolean hasChanged = false;
+    for (RoleInstance instance : getLiveContainers().values()) {
+      if (instance.providerRole.probe == null) {
+        continue;
+      }
+      boolean ready = instance.providerRole.probe.ping(instance).isSuccess();
+      if (ready) {
+        if (instance.state != STATE_READY) {
+          instance.state = STATE_READY;
+          hasChanged = true;
+          log.info("State of {} changed to ready", instance.role);
+        }
+      } else {
+        if (instance.state == STATE_READY) {
+          instance.state = STATE_NOT_READY;
+          hasChanged = true;
+          log.info("State of {} changed from ready to not ready", instance
+              .role);
+        }
+      }
+    }
+    return hasChanged;
+  }
+
   /**
    * Look at the allocation status of one role, and trigger add/release
    * actions if the number of desired role instances doesn't equal

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
index 9842481..3d9a8f3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
@@ -45,7 +45,6 @@ import java.util.Map;
 public final class RoleStatus implements MetricSet {
 
   private final String name;
-  private final String group;
 
   /**
    * Role priority
@@ -66,7 +65,6 @@ public final class RoleStatus implements MetricSet {
   public RoleStatus(ProviderRole providerRole) {
     this.providerRole = providerRole;
     this.name = providerRole.name;
-    this.group = providerRole.group;
     this.key = providerRole.id;
     componentMetrics =
         SliderMetrics.register(this.name, "Metrics for component " + this.name);
@@ -95,10 +93,6 @@ public final class RoleStatus implements MetricSet {
     return name;
   }
 
-  public String getGroup() {
-    return group;
-  }
-
   public int getKey() {
     return key;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/HttpProbe.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/HttpProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/HttpProbe.java
index 9c14ca7..f6b03d0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/HttpProbe.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/HttpProbe.java
@@ -18,30 +18,50 @@
 package org.apache.slider.server.servicemonitor;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
+import java.util.Map;
 
 public class HttpProbe extends Probe {
   protected static final Logger log = LoggerFactory.getLogger(HttpProbe.class);
 
-  private final URL url;
+  private static final String HOST_TOKEN = "${THIS_HOST}";
+
+  private final String urlString;
   private final int timeout;
   private final int min, max;
 
 
-  public HttpProbe(URL url, int timeout, int min, int max, Configuration conf) throws IOException {
+  public HttpProbe(String url, int timeout, int min, int max, Configuration
+      conf) {
     super("Http probe of " + url + " [" + min + "-" + max + "]", conf);
-    this.url = url;
+    this.urlString = url;
     this.timeout = timeout;
     this.min = min;
     this.max = max;
   }
 
-  public static HttpURLConnection getConnection(URL url, int timeout) throws IOException {
+  public static HttpProbe create(Map<String, String> props)
+      throws IOException {
+    String urlString = getProperty(props, WEB_PROBE_URL, null);
+    new URL(urlString);
+    int timeout = getPropertyInt(props, WEB_PROBE_CONNECT_TIMEOUT,
+        WEB_PROBE_CONNECT_TIMEOUT_DEFAULT);
+    int minSuccess = getPropertyInt(props, WEB_PROBE_MIN_SUCCESS,
+        WEB_PROBE_MIN_SUCCESS_DEFAULT);
+    int maxSuccess = getPropertyInt(props, WEB_PROBE_MAX_SUCCESS,
+        WEB_PROBE_MAX_SUCCESS_DEFAULT);
+    return new HttpProbe(urlString, timeout, minSuccess, maxSuccess, null);
+  }
+
+
+  private static HttpURLConnection getConnection(URL url, int timeout) throws
+      IOException {
     HttpURLConnection connection = (HttpURLConnection) url.openConnection();
     connection.setInstanceFollowRedirects(true);
     connection.setConnectTimeout(timeout);
@@ -49,13 +69,17 @@ public class HttpProbe extends Probe {
   }
   
   @Override
-  public ProbeStatus ping(boolean livePing) {
+  public ProbeStatus ping(RoleInstance roleInstance) {
     ProbeStatus status = new ProbeStatus();
+    String ip = roleInstance.ip;
+    if (ip == null) {
+      status.fail(this, new IOException("IP is not available yet"));
+      return status;
+    }
+
     HttpURLConnection connection = null;
     try {
-      if (log.isDebugEnabled()) {
-        // LOG.debug("Fetching " + url + " with timeout " + timeout);
-      }
+      URL url = new URL(urlString.replace(HOST_TOKEN, ip));
       connection = getConnection(url, this.timeout);
       int rc = connection.getResponseCode();
       if (rc < min || rc > max) {
@@ -66,8 +90,8 @@ public class HttpProbe extends Probe {
       } else {
         status.succeed(this);
       }
-    } catch (IOException e) {
-      String error = "Probe " + url + " failed: " + e;
+    } catch (Throwable e) {
+      String error = "Probe " + urlString + " failed for IP " + ip + ": " + e;
       log.info(error, e);
       status.fail(this,
                   new IOException(error, e));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/MonitorKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/MonitorKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/MonitorKeys.java
index f7bdd4a..e97ab43 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/MonitorKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/MonitorKeys.java
@@ -23,257 +23,44 @@ package org.apache.slider.server.servicemonitor;
 public interface MonitorKeys {
 
   /**
-   * Prefix of all other configuration options: {@value}
+   * Port probing key : port to attempt to create a TCP connection to {@value}.
    */
-  String MONITOR_KEY_PREFIX = "service.monitor.";
-
-
-  /**
-   * Classname of the reporter Key: {@value}
-   */
-  String MONITOR_REPORTER =
-    MONITOR_KEY_PREFIX + "report.classname";
-
-  /**
-   * Interval in milliseconds between reporting health status to the reporter
-   * Key: {@value}
-   */
-  String MONITOR_REPORT_INTERVAL =
-    MONITOR_KEY_PREFIX + "report.interval";
-
-  /**
-   * Time in millis between the last probing cycle ending and the new one
-   * beginning. Key: {@value}
-   */
-  String MONITOR_PROBE_INTERVAL =
-    MONITOR_KEY_PREFIX + "probe.interval";
-
-  /**
-   * How long in milliseconds does the probing loop have to be blocked before
-   * that is considered a liveness failure Key: {@value}
-   */
-  String MONITOR_PROBE_TIMEOUT =
-    MONITOR_KEY_PREFIX + "probe.timeout";
-
-  /**
-   * How long in milliseconds does the probing loop have to be blocked before
-   * that is considered a liveness failure Key: {@value}
-   */
-  String MONITOR_BOOTSTRAP_TIMEOUT =
-    MONITOR_KEY_PREFIX + "bootstrap.timeout";
-
-
-  /**
-   * does the monitor depend on DFS being live
-   */
-  String MONITOR_DEPENDENCY_DFSLIVE =
-    MONITOR_KEY_PREFIX + "dependency.dfslive";
-
-
-  /**
-   * default timeout for the entire bootstrap phase {@value}
-   */
-
-  int BOOTSTRAP_TIMEOUT_DEFAULT = 60000;
-
-
-  /**
-   * Default value if the key is not in the config file: {@value}
-   */
-  int REPORT_INTERVAL_DEFAULT = 10000;
+  String PORT_PROBE_PORT = "port";
   /**
-   * Default value if the key is not in the config file: {@value}
+   * Port probing key : timeout for the the connection attempt {@value}.
    */
-  int PROBE_INTERVAL_DEFAULT = 10000;
+  String PORT_PROBE_CONNECT_TIMEOUT = "timeout";
   /**
-   * Default value if the key is not in the config file: {@value}
+   * Port probing default : timeout for the connection attempt {@value}.
    */
-  int PROBE_TIMEOUT_DEFAULT = 60000;
-
-  /**
-   * Port probe enabled/disabled flag Key: {@value}
-   */
-  String PORT_PROBE_ENABLED =
-    MONITOR_KEY_PREFIX + "portprobe.enabled";
-
-
-  /**
-   * Port probing key : port to attempt to create a TCP connection to {@value}
-   */
-  String PORT_PROBE_PORT =
-    MONITOR_KEY_PREFIX + "portprobe.port";
-
-  /**
-   * Port probing key : port to attempt to create a TCP connection to {@value}
-   */
-  String PORT_PROBE_HOST =
-    MONITOR_KEY_PREFIX + "portprobe.host";
-
-
-  /**
-   * Port probing key : timeout of the connection attempt {@value}
-   */
-  String PORT_PROBE_CONNECT_TIMEOUT =
-    MONITOR_KEY_PREFIX + "portprobe.connect.timeout";
-
-  /**
-   * Port probing key : bootstrap timeout -how long in milliseconds should the
-   * port probing take to connect before the failure to connect is considered a
-   * liveness failure. That is: how long should the IPC port take to come up?
-   * {@value}
-   */
-  String PORT_PROBE_BOOTSTRAP_TIMEOUT =
-    MONITOR_KEY_PREFIX + "portprobe.bootstrap.timeout";
-
-
-  /**
-   * default timeout for port probes {@value}
-   */
-  int PORT_PROBE_BOOTSTRAP_TIMEOUT_DEFAULT = 60000;
-
-  /**
-   * default value for port probe connection attempts {@value}
-   */
-
   int PORT_PROBE_CONNECT_TIMEOUT_DEFAULT = 1000;
 
-
-  /**
-   * default port for probes {@value}
-   */
-  int DEFAULT_PROBE_PORT = 8020;
-
-
   /**
-   * default host for probes {@value}
+   * Web probing key : URL {@value}.
    */
-  String DEFAULT_PROBE_HOST = "localhost";
-
-
+  String WEB_PROBE_URL = "url";
   /**
-   * Probe enabled/disabled flag Key: {@value}
+   * Web probing key : min success code {@value}.
    */
-  String LS_PROBE_ENABLED =
-    MONITOR_KEY_PREFIX + "lsprobe.enabled";
-
+  String WEB_PROBE_MIN_SUCCESS = "min.success";
   /**
-   * Probe path for LS operation Key: {@value}
+   * Web probing key : max success code {@value}.
    */
-  String LS_PROBE_PATH =
-    MONITOR_KEY_PREFIX + "lsprobe.path";
-
+  String WEB_PROBE_MAX_SUCCESS = "max.success";
   /**
-   * Default path for LS operation Key: {@value}
+   * Web probing default : min successful response code {@value}.
    */
-  String LS_PROBE_DEFAULT = "/";
-
+  int WEB_PROBE_MIN_SUCCESS_DEFAULT = 200;
   /**
-   * Port probing key : bootstrap timeout -how long in milliseconds should the
-   * port probing take to connect before the failure to connect is considered a
-   * liveness failure. That is: how long should the IPC port take to come up?
-   * {@value}
+   * Web probing default : max successful response code {@value}.
    */
-  String LS_PROBE_BOOTSTRAP_TIMEOUT =
-    MONITOR_KEY_PREFIX + "lsprobe.bootstrap.timeout";
-
-
+  int WEB_PROBE_MAX_SUCCESS_DEFAULT = 299;
   /**
-   * default timeout for port probes {@value}
+   * Web probing key : timeout for the connection attempt {@value}
    */
-
-  int LS_PROBE_BOOTSTRAP_TIMEOUT_DEFAULT = PORT_PROBE_BOOTSTRAP_TIMEOUT_DEFAULT;
-
-
-  /**
-   * Probe enabled/disabled flag Key: {@value}
-   */
-  String WEB_PROBE_ENABLED =
-    MONITOR_KEY_PREFIX + "webprobe.enabled";
-
+  String WEB_PROBE_CONNECT_TIMEOUT = "timeout";
   /**
-   * Probe URL Key: {@value}
+   * Port probing default : timeout for the connection attempt {@value}.
    */
-  String WEB_PROBE_URL =
-    MONITOR_KEY_PREFIX + "webprobe.url";
-
-  /**
-   * Default path for web probe Key: {@value}
-   */
-  String WEB_PROBE_DEFAULT_URL = "http://localhost:50070/";
-
-  /**
-   * min error code Key: {@value}
-   */
-  String WEB_PROBE_MIN =
-    MONITOR_KEY_PREFIX + "webprobe.min";
-  /**
-   * min error code Key: {@value}
-   */
-  String WEB_PROBE_MAX =
-    MONITOR_KEY_PREFIX + "webprobe.max";
-
-
-  /**
-   * Port probing key : timeout of the connection attempt {@value}
-   */
-  String WEB_PROBE_CONNECT_TIMEOUT =
-    MONITOR_KEY_PREFIX + "webprobe.connect.timeout";
-
-  /**
-   * Default HTTP response code expected from the far end for
-   * the endpoint to be considered live.
-   */
-  int WEB_PROBE_DEFAULT_CODE = 200;
-
-  /**
-   * Port probing key : bootstrap timeout -how long in milliseconds should the
-   * port probing take to connect before the failure to connect is considered a
-   * liveness failure. That is: how long should the IPC port take to come up?
-   * {@value}
-   */
-  String WEB_PROBE_BOOTSTRAP_TIMEOUT =
-    MONITOR_KEY_PREFIX + "webprobe.bootstrap.timeout";
-
-
-  /**
-   * default timeout for port probes {@value}
-   */
-
-  int WEB_PROBE_BOOTSTRAP_TIMEOUT_DEFAULT = PORT_PROBE_BOOTSTRAP_TIMEOUT_DEFAULT;
-
-  /**
-   * Probe enabled/disabled flag Key: {@value}
-   */
-  String JT_PROBE_ENABLED =
-    MONITOR_KEY_PREFIX + "jtprobe.enabled";
-
-  /**
-   * Port probing key : bootstrap timeout -how long in milliseconds should the
-   * port probing take to connect before the failure to connect is considered a
-   * liveness failure. That is: how long should the IPC port take to come up?
-   * {@value}
-   */
-  String JT_PROBE_BOOTSTRAP_TIMEOUT =
-    MONITOR_KEY_PREFIX + "jtprobe.bootstrap.timeout";
-
-
-  /**
-   * default timeout for port probes {@value}
-   */
-
-  int JT_PROBE_BOOTSTRAP_TIMEOUT_DEFAULT = PORT_PROBE_BOOTSTRAP_TIMEOUT_DEFAULT;
-
-
-  /**
-   * Probe enabled/disabled flag Key: {@value}
-   */
-  String PID_PROBE_ENABLED =
-    MONITOR_KEY_PREFIX + "pidprobe.enabled";
-
-  /**
-   * PID probing key : pid to attempt to create a TCP connection to {@value}
-   */
-  String PID_PROBE_PIDFILE =
-    MONITOR_KEY_PREFIX + "pidprobe.pidfile";
-
+  int WEB_PROBE_CONNECT_TIMEOUT_DEFAULT = 1000;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/MonitorUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/MonitorUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/MonitorUtils.java
index a4447e3..1e5c94c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/MonitorUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/MonitorUtils.java
@@ -17,25 +17,19 @@
 
 package org.apache.slider.server.servicemonitor;
 
+import org.apache.slider.api.resource.ReadinessCheck;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.net.UnknownHostException;
-import java.util.ArrayList;
 import java.util.Formatter;
-import java.util.List;
 import java.util.Locale;
-import java.util.Map;
-import java.util.TreeSet;
 
 /**
  * Various utils to work with the monitor
  */
 public final class MonitorUtils {
-  protected static final Logger log = LoggerFactory.getLogger(MonitorUtils.class);
+  protected static final Logger LOG = LoggerFactory.getLogger(MonitorUtils
+      .class);
 
   private MonitorUtils() {
   }
@@ -45,25 +39,6 @@ public final class MonitorUtils {
   }
 
   /**
-   * Convert the arguments -including dropping any empty strings that creep in
-   * @param args arguments
-   * @return a list view with no empty strings
-   */
-  public static List<String> prepareArgs(String[] args) {
-    List<String> argsList = new ArrayList<String>(args.length);
-    StringBuilder argsStr = new StringBuilder("Arguments: [");
-    for (String arg : args) {
-      argsStr.append('"').append(arg).append("\" ");
-      if (!arg.isEmpty()) {
-        argsList.add(arg);
-      }
-    }
-    argsStr.append(']');
-    log.debug(argsStr.toString());
-    return argsList;
-  }
-
-  /**
    * Convert milliseconds to human time -the exact format is unspecified
    * @param milliseconds a time in milliseconds
    * @return a time that is converted to human intervals
@@ -85,25 +60,25 @@ public final class MonitorUtils {
     return sb.toString();
   }
 
-  public static InetSocketAddress getURIAddress(URI uri) {
-    String host = uri.getHost();
-    int port = uri.getPort();
-    return new InetSocketAddress(host, port);
-  }
-
-
-  /**
-   * Get the localhost -may be null
-   * @return the localhost if known
-   */
-  public static InetAddress getLocalHost() {
-    InetAddress localHost;
+  public static Probe getProbe(ReadinessCheck readinessCheck) {
+    if (readinessCheck == null) {
+      return null;
+    }
+    if (readinessCheck.getType() == null) {
+      return null;
+    }
     try {
-      localHost = InetAddress.getLocalHost();
-    } catch (UnknownHostException e) {
-      localHost = null;
+      switch (readinessCheck.getType()) {
+      case HTTP:
+        return HttpProbe.create(readinessCheck.getProps());
+      case PORT:
+        return PortProbe.create(readinessCheck.getProps());
+      default:
+        return null;
+      }
+    } catch (Throwable t) {
+      throw new IllegalArgumentException("Error creating readiness check " +
+          t);
     }
-    return localHost;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/PortProbe.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/PortProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/PortProbe.java
index b1ff792..252242f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/PortProbe.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/PortProbe.java
@@ -17,91 +17,77 @@
 
 package org.apache.slider.server.servicemonitor;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.Socket;
+import java.util.Map;
 
 /**
- * Probe for a port being open
+ * Probe for a port being open.
  */
 public class PortProbe extends Probe {
   protected static final Logger log = LoggerFactory.getLogger(PortProbe.class);
-  private final String host;
   private final int port;
   private final int timeout;
 
-  public PortProbe(String host, int port, int timeout, String name, Configuration conf)
-      throws IOException {
-    super("Port probe " + name + " " + host + ":" + port + " for " + timeout + "ms",
-          conf);
-    this.host = host;
+  public PortProbe(int port, int timeout) {
+    super("Port probe of " + port + " for " + timeout + "ms", null);
     this.port = port;
     this.timeout = timeout;
   }
 
-  public static PortProbe createPortProbe(Configuration conf,
-                                          String hostname,
-                                          int port) throws IOException {
-    PortProbe portProbe = new PortProbe(hostname,
-                                        port,
-                                        conf.getInt(
-                                          PORT_PROBE_CONNECT_TIMEOUT,
-                                          PORT_PROBE_CONNECT_TIMEOUT_DEFAULT),
-                                        "",
-                                        conf);
-
-    return portProbe;
-  }
+  public static PortProbe create(Map<String, String> props)
+      throws IOException {
+    int port = getPropertyInt(props, PORT_PROBE_PORT, null);
 
-  @Override
-  public void init() throws IOException {
     if (port >= 65536) {
-      throw new IOException("Port is out of range: " + port);
-    }
-    InetAddress target;
-    if (host != null) {
-      log.debug("looking up host " + host);
-      target = InetAddress.getByName(host);
-    } else {
-      log.debug("Host is null, retrieving localhost address");
-      target = InetAddress.getLocalHost();
+      throw new IOException(PORT_PROBE_PORT + " " + port + " is out of " +
+          "range");
     }
-    log.info("Checking " + target + ":" + port);
+
+    int timeout = getPropertyInt(props, PORT_PROBE_CONNECT_TIMEOUT,
+        PORT_PROBE_CONNECT_TIMEOUT_DEFAULT);
+
+    return new PortProbe(port, timeout);
   }
 
   /**
    * Try to connect to the (host,port); a failure to connect within
-   * the specified timeout is a failure
-   * @param livePing is the ping live: true for live; false for boot time
+   * the specified timeout is a failure.
+   * @param roleInstance role instance
    * @return the outcome
    */
   @Override
-  public ProbeStatus ping(boolean livePing) {
+  public ProbeStatus ping(RoleInstance roleInstance) {
     ProbeStatus status = new ProbeStatus();
-    InetSocketAddress sockAddr = new InetSocketAddress(host, port);
+
+    String ip = roleInstance.ip;
+    if (ip == null) {
+      status.fail(this, new IOException("IP is not available yet"));
+      return status;
+    }
+
+    InetSocketAddress sockAddr = new InetSocketAddress(ip, port);
     Socket socket = new Socket();
     try {
       if (log.isDebugEnabled()) {
-        log.debug("Connecting to " + sockAddr.toString() + " connection-timeout=" +
-                  MonitorUtils.millisToHumanTime(timeout));
+        log.debug("Connecting to " + sockAddr.toString() + "timeout=" +
+            MonitorUtils.millisToHumanTime(timeout));
       }
       socket.connect(sockAddr, timeout);
       status.succeed(this);
-    } catch (IOException e) {
+    } catch (Throwable e) {
       String error = "Probe " + sockAddr + " failed: " + e;
       log.debug(error, e);
-      status.fail(this,
-                  new IOException(error, e));
+      status.fail(this, new IOException(error, e));
     } finally {
       IOUtils.closeSocket(socket);
     }
     return status;
-
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/Probe.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/Probe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/Probe.java
index be4b5ef..e149442 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/Probe.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/Probe.java
@@ -17,9 +17,12 @@
 
 package org.apache.slider.server.servicemonitor;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.slider.server.appmaster.state.RoleInstance;
 
 import java.io.IOException;
+import java.util.Map;
 
 /**
  * Base class of all probes.
@@ -29,19 +32,6 @@ public abstract class Probe implements MonitorKeys {
   protected final Configuration conf;
   private String name;
 
-  // =======================================================
-  /*
-   * These fields are all used by the probe loops
-   * to maintain state. Please Leave them alone.
-   */
-  public int successCount;
-  public int failureCount;
-  public long bootstrapStarted;
-  public long bootstrapFinished;
-  private boolean booted = false;
-
-  // =======================================================
-
   /**
    * Create a probe of a specific name
    *
@@ -65,11 +55,31 @@ public abstract class Probe implements MonitorKeys {
 
   @Override
   public String toString() {
-    return getName() +
-           " {" +
-           "successCount=" + successCount +
-           ", failureCount=" + failureCount +
-           '}';
+    return getName();
+  }
+
+  public static String getProperty(Map<String, String> props, String name,
+      String defaultValue) throws IOException {
+    String value = props.get(name);
+    if (StringUtils.isEmpty(value)) {
+      if (defaultValue == null) {
+        throw new IOException(name + " not specified");
+      }
+      return defaultValue;
+    }
+    return value;
+  }
+
+  public static int getPropertyInt(Map<String, String> props, String name,
+      Integer defaultValue) throws IOException {
+    String value = props.get(name);
+    if (StringUtils.isEmpty(value)) {
+      if (defaultValue == null) {
+        throw new IOException(name + " not specified");
+      }
+      return defaultValue;
+    }
+    return Integer.parseInt(value);
   }
 
   /**
@@ -83,25 +93,9 @@ public abstract class Probe implements MonitorKeys {
    * Ping the endpoint. All exceptions must be caught and included in the
    * (failure) status.
    *
-   * @param livePing is the ping live: true for live; false for boot time
+   * @param roleInstance instance to ping
    * @return the status
    */
-  public abstract ProbeStatus ping(boolean livePing);
+  public abstract ProbeStatus ping(RoleInstance roleInstance);
 
-  public void beginBootstrap() {
-    bootstrapStarted = System.currentTimeMillis();
-  }
-
-  public void endBootstrap() {
-    setBooted(true);
-    bootstrapFinished = System.currentTimeMillis();
-  }
-
-  public boolean isBooted() {
-    return booted;
-  }
-
-  public void setBooted(boolean booted) {
-    this.booted = booted;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeFailedException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeFailedException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeFailedException.java
deleted file mode 100644
index f09b848..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeFailedException.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.servicemonitor;
-
-/**
- * An exception to raise on a probe failure
- */
-public class ProbeFailedException extends Exception {
-
-  public final ProbeStatus status;
-
-  public ProbeFailedException(String text, ProbeStatus status) {
-    super((text == null ? "Probe Failed" : (text + ": ")) + status, status.getThrown());
-    this.status = status;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeInterruptedException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeInterruptedException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeInterruptedException.java
deleted file mode 100644
index 5a02f46..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeInterruptedException.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.servicemonitor;
-
-/**
- * This exception is raised when the probe loop detects that it has been requested to stop
- *
- */
-public class ProbeInterruptedException extends Exception {
-
-  public ProbeInterruptedException() {
-    super("Probe Interrupted");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbePhase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbePhase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbePhase.java
deleted file mode 100644
index d87c81b..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbePhase.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.servicemonitor;
-
-/**
- * Probe phases. The names are for strings; the index is the order in which things happen;
- * -any state can got to terminating directly.
- */
-public enum ProbePhase {
-  INIT("Initializing", 0),
-  DEPENDENCY_CHECKING("Dependencies", 1),
-  BOOTSTRAPPING("Bootstrapping", 2),
-  LIVE("Live", 3),
-  TERMINATING("Terminating", 4);
-
-  private final String name;
-  private final int index;
-
-  ProbePhase(String name, int index) {
-    this.name = name;
-    this.index = index;
-  }
-
-  public String getName() {
-    return name;
-  }
-
-  public int getIndex() {
-    return index;
-  }
-
-  /**
-   * How many phases are there?
-   */
-  public static final int PHASE_COUNT = TERMINATING.index + 1;
-
-  @Override
-  public String toString() {
-    return name;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeReportHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeReportHandler.java
deleted file mode 100644
index 36c20c8..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeReportHandler.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.servicemonitor;
-
-/**
- * This interface is for use by the Poll Workers to send events to the reporters.
- *
- * It is up the reporters what to do with the specific events.
- */
-public interface ProbeReportHandler {
-
-  /**
-   * The probe process has changed state. 
-   * @param probePhase the new process phrase
-   */
-  void probeProcessStateChange(ProbePhase probePhase);
-
-  /**
-   * Report a probe outcome
-   * @param phase the current phase of probing
-   * @param status the probe status
-   */
-  void probeResult(ProbePhase phase, ProbeStatus status);
-
-  /**
-   * A probe has failed
-   */
-  void probeFailure(ProbeFailedException exception);
-
-  /**
-   * A probe has just booted
-   * @param status probe status
-   */
-  void probeBooted(ProbeStatus status);
-
-  boolean commence(String name, String description);
-
-  void unregister();
-
-  /**
-   * A heartbeat event should be raised
-   * @param status the probe status
-   */
-  void heartbeat(ProbeStatus status);
-
-  /**
-   * A probe has timed out
-   * @param currentPhase the current execution phase
-   * @param probe the probe that timed out
-   * @param lastStatus the last status that was successfully received -which is implicitly 
-   * not the status of the timed out probe
-   * @param currentTime the current time
-   */
-  void probeTimedOut(ProbePhase currentPhase,
-                     Probe probe,
-                     ProbeStatus lastStatus,
-                     long currentTime);
-
-  /**
-   * Event to say that the live probe cycle completed so the entire
-   * system can be considered functional.
-   */
-  void liveProbeCycleCompleted();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeStatus.java
index b4deabc..24668bd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeStatus.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeStatus.java
@@ -34,7 +34,6 @@ public final class ProbeStatus implements Serializable {
   private String message;
   private Throwable thrown;
   private transient Probe originator;
-  private ProbePhase probePhase;
 
   public ProbeStatus() {
   }
@@ -99,14 +98,6 @@ public final class ProbeStatus implements Serializable {
     this.thrown = thrown;
   }
 
-  public ProbePhase getProbePhase() {
-    return probePhase;
-  }
-
-  public void setProbePhase(ProbePhase probePhase) {
-    this.probePhase = probePhase;
-  }
-
   /**
    * Get the probe that generated this result. May be null
    * @return a possibly null reference to a probe
@@ -147,7 +138,6 @@ public final class ProbeStatus implements Serializable {
   public String toString() {
     LogEntryBuilder builder = new LogEntryBuilder("Probe Status");
     builder.elt("time", timestampText)
-           .elt("phase", probePhase)
            .elt("outcome", (success ? "success" : "failure"));
 
     if (success != realOutcome) {
@@ -161,10 +151,6 @@ public final class ProbeStatus implements Serializable {
     return builder.toString();
   }
 
-  public boolean inPhase(ProbePhase phase) {
-    return getProbePhase().equals(phase);
-  }
-
   /**
    * Flip the success bit on while the real outcome bit is kept false
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14b8371b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeWorker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeWorker.java
deleted file mode 100644
index f64ec8d..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/ProbeWorker.java
+++ /dev/null
@@ -1,446 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.servicemonitor;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * This is the entry point to do work. A list of probes is taken in, in order of
- * booting. Once live they go to the live probes list.
- *
- * The dependency probes are a set of probes for dependent services, all of which
- * must be live before boot probes commence.
- *
- * The boot probes are executed and are allowed to fail; failure is interpreted as "not yet live"
- *
- * Once all boot probes are live, the live list is used for probes; these must not fail.
- *
- * There is no timeout on dependency probe bootstrap time, because of the notion that
- * restarting this service will have no effect on the dependencies. 
- */
-
-public class ProbeWorker implements Runnable {
-  protected static final Logger log = LoggerFactory.getLogger(ProbeWorker.class);
-
-  public static final String FAILED_TO_BOOT = "Monitored service failed to bootstrap after ";
-  public static final String FAILURE_OF_A_LIVE_PROBE_DURING_BOOTSTRAPPING = "Failure of a live probe during bootstrapping";
-  private final List<Probe> monitorProbes;
-  private final List<Probe> dependencyProbes;
-  public final int interval;
-  protected volatile ProbeStatus lastStatus;
-  protected volatile ProbeStatus lastFailingBootstrapProbe;
-  protected volatile Probe currentProbe;
-  private volatile boolean mustExit;
-  private final int bootstrapTimeout;
-  private long bootstrapEndtime;
-
-  private ProbeReportHandler reportHandler;
-  private volatile ProbePhase probePhase = ProbePhase.INIT;
-
-  /**
-   * Create a probe worker
-   * @param monitorProbes list of probes that must boot and then go live -after which
-   * they must stay live.
-   * @param dependencyProbes the list of dependency probes that must all succeed before
-   * any attempt to probe the direct probe list is performed. Once the 
-   * dependency phase has completed, these probes are never checked again.
-   * @param interval probe interval in milliseconds.
-   * @param bootstrapTimeout timeout for bootstrap in milliseconds
-   */
-  public ProbeWorker(List<Probe> monitorProbes, List<Probe> dependencyProbes, int interval, int bootstrapTimeout) {
-    this.monitorProbes = monitorProbes;
-    this.dependencyProbes = dependencyProbes != null ? dependencyProbes : new ArrayList<Probe>(0);
-    this.interval = interval;
-    lastStatus = new ProbeStatus(now(),
-                                 "Initial status");
-    lastStatus.setProbePhase(ProbePhase.INIT);
-    this.bootstrapTimeout = bootstrapTimeout;
-  }
-
-  public void init() throws IOException {
-    for (Probe probe : monitorProbes) {
-      probe.init();
-    }
-    for (Probe probe : dependencyProbes) {
-      probe.init();
-    }
-  }
-
-  public void setReportHandler(ProbeReportHandler reportHandler) {
-    this.reportHandler = reportHandler;
-  }
-
-  public void setMustExit() {
-    this.mustExit = true;
-  }
-
-  public ProbeStatus getLastStatus() {
-    return lastStatus;
-  }
-
-  public synchronized Probe getCurrentProbe() {
-    return currentProbe;
-  }
-
-  public ProbePhase getProbePhase() {
-    return probePhase;
-  }
-
-  /**
-   * Enter the new process state, and report it to the report handler.
-   * This is synchronized just to make sure there isn't more than one
-   * invocation at the same time.
-   * @param status the new process status
-   */
-  private synchronized void enterProbePhase(ProbePhase status) {
-    this.probePhase = status;
-    if (reportHandler != null) {
-      reportHandler.probeProcessStateChange(status);
-    }
-  }
-
-  /**
-   * Report the probe status to the listener -setting the probe phase field
-   * before doing so.
-   * The value is also stored in the {@link #lastStatus} field
-   * @param status the new status
-   */
-  private void reportProbeStatus(ProbeStatus status) {
-    ProbePhase phase = getProbePhase();
-    status.setProbePhase(phase);
-    lastStatus = status;
-    reportHandler.probeResult(phase, status);
-  }
-
-  /**
-   * Ping one probe. Logs the operation at debug level; sets the field <code>currentProbe</code>
-   * to the probe for the duration of the operation -this is used when identifying the
-   * cause of a hung reporting loop
-   * @param probe probe to ping
-   * @param live flag to indicate whether or not the operation is live or bootstrapping
-   * @return the status of the ping
-   * @throws ProbeInterruptedException if the probe has been told to exit
-   */
-  private ProbeStatus ping(Probe probe, boolean live) throws ProbeInterruptedException {
-    if (log.isDebugEnabled()) {
-      log.debug("Executing " + probe);
-    }
-    checkForExitRequest();
-    currentProbe = probe;
-    try {
-      return probe.ping(live);
-    } finally {
-      currentProbe = null;
-    }
-  }
-
-  /**
-   * Check for an exit request -and convert it to an exception if made
-   * @throws ProbeInterruptedException iff {@link #mustExit} is true
-   */
-  private void checkForExitRequest() throws ProbeInterruptedException {
-    if (mustExit) {
-      throw new ProbeInterruptedException();
-    }
-  }
-
-  /**
-   * Check the dependencies. 
-   * The moment a failing test is reached the call returns without
-   * any reporting.
-   *
-   * All successful probes are reported, so as to keep the heartbeats happy.
-   *
-   * @return the status of the last dependency check. If this is a success
-   * them every probe passed.
-   */
-  private ProbeStatus checkDependencyProbes() throws ProbeInterruptedException {
-    ProbeStatus status = null;
-    for (Probe dependency : dependencyProbes) {
-      //ping them, making clear they are not to run any bootstrap logic
-      status = ping(dependency, true);
-
-      if (!status.isSuccess()) {
-        //the first failure means the rest of the list can be skipped
-        break;
-      }
-      reportProbeStatus(status);
-    }
-    //return the last status
-    return status;
-  }
-
-  /**
-   * Run through all the dependency probes and report their outcomes successes (even if they fail)
-   * @return true iff all the probes have succeeded.
-   * @throws ProbeInterruptedException if the process was interrupted.
-   */
-  public boolean checkAndReportDependencyProbes() throws ProbeInterruptedException {
-    ProbeStatus status;
-    status = checkDependencyProbes();
-    if (status != null && !status.isSuccess()) {
-      //during dependency checking, a failure is still reported as a success
-      status.markAsSuccessful();
-      reportProbeStatus(status);
-      //then return without checking anything else
-      return false;
-    }
-    //all dependencies are done.
-    return true;
-  }
-
-  /**
-   * Begin bootstrapping by telling each probe that they have started.
-   * This sets the timeouts up, as well as permits any other set-up actions
-   * to begin.
-   */
-  private void beginBootstrapProbes() {
-    synchronized (this) {
-      bootstrapEndtime = now() + bootstrapTimeout;
-    }
-    for (Probe probe : monitorProbes) {
-      probe.beginBootstrap();
-    }
-  }
-
-  private long now() {
-    return System.currentTimeMillis();
-  }
-
-
-  /**
-   * Check the bootstrap probe list. All successful probes get reported.
-   * The first unsuccessful probe will be returned and not reported (left for policy upstream).
-   * If the failing probe has timed out, that is turned into a {@link ProbeFailedException}
-   * @return the last (unsuccessful) probe, or null if they all succeeded
-   * @throws ProbeInterruptedException interrupts
-   * @throws ProbeFailedException on a boot timeout
-   */
-  private boolean checkBootstrapProbes() throws ProbeInterruptedException, ProbeFailedException {
-    verifyBootstrapHasNotTimedOut();
-
-    boolean probeFailed = false;
-    //now run through all the bootstrap probes
-    for (Probe probe : monitorProbes) {
-      //ping them
-      ProbeStatus status = ping(probe, false);
-      if (!status.isSuccess()) {
-        probeFailed = true;
-        lastFailingBootstrapProbe = status;
-        probe.failureCount++;
-        if (log.isDebugEnabled()) {
-          log.debug("Booting probe failed: " + status);
-        }
-        //at this point check to see if the timeout has occurred -and if so, force in the last probe status.
-
-        //this is a failure but not a timeout
-        //during boot, a failure of a probe that hasn't booted is still reported as a success
-        if (!probe.isBooted()) {
-          //so the success bit is flipped
-          status.markAsSuccessful();
-          reportProbeStatus(status);
-        } else {
-          //the probe had booted but then it switched to failing
-
-          //update the status unedited
-          reportProbeStatus(status);
-          //then fail
-          throw raiseProbeFailure(status, FAILURE_OF_A_LIVE_PROBE_DURING_BOOTSTRAPPING);
-        }
-      } else {
-        //this probe is working
-        if (!probe.isBooted()) {
-          //if it is new, mark it as live
-          if (log.isDebugEnabled()) {
-            log.debug("Booting probe is now live: " + probe);
-          }
-          probe.endBootstrap();
-          //tell the report handler that another probe has booted
-          reportHandler.probeBooted(status);
-        }
-        //push out its status
-        reportProbeStatus(status);
-        probe.successCount++;
-      }
-    }
-    return !probeFailed;
-  }
-
-
-  public int getBootstrapTimeout() {
-    return bootstrapTimeout;
-  }
-
-  /**
-   * This checks that bootstrap operations have not timed out
-   * @throws ProbeFailedException if the bootstrap has failed
-   */
-  public void verifyBootstrapHasNotTimedOut() throws ProbeFailedException {
-    //first step -look for a timeout
-    if (isBootstrapTimeExceeded()) {
-      String text = FAILED_TO_BOOT
-                    + MonitorUtils.millisToHumanTime(bootstrapTimeout);
-
-      ProbeStatus status;
-      if (lastFailingBootstrapProbe != null) {
-        status = lastFailingBootstrapProbe;
-        status.setSuccess(false);
-      } else {
-        status = new ProbeStatus();
-        status.finish(null, false, text, null);
-      }
-
-      throw raiseProbeFailure(status,
-                              text);
-    }
-  }
-
-  /**
-   * predicate that gets current time and checks for its time being exceeded.
-   * @return true iff the current time is > the end time
-   */
-  public synchronized boolean isBootstrapTimeExceeded() {
-    return now() > bootstrapEndtime;
-  }
-
-  /**
-   * run through all the bootstrap probes and see if they are live.
-   * @return true iff all boot probes succeeded
-   * @throws ProbeInterruptedException the probe interruption flags
-   * @throws ProbeFailedException if a probe failed.
-   */
-  public boolean checkAndReportBootstrapProbes() throws ProbeInterruptedException,
-                                                        ProbeFailedException {
-    if (bootstrapTimeout <= 0) {
-      //there is no period of grace for bootstrapping probes, so return true saying
-      //this phase is complete
-      return true;
-    }
-    //now the bootstrapping probes
-    return checkBootstrapProbes();
-  }
-
-
-  /**
-   * run through all the live probes, pinging and reporting them.
-   * A single probe failure is turned into an exception
-   * @throws ProbeFailedException a probe failed
-   * @throws ProbeInterruptedException the probe process was explicitly interrupted
-   */
-  protected void checkAndReportLiveProbes() throws ProbeFailedException, ProbeInterruptedException {
-    ProbeStatus status = null;
-    //go through the live list
-    if (log.isDebugEnabled()) {
-      log.debug("Checking live probes");
-    }
-    for (Probe probe : monitorProbes) {
-      status = ping(probe, true);
-      reportProbeStatus(status);
-      if (!status.isSuccess()) {
-        throw raiseProbeFailure(status, "Failure of probe in \"live\" monitor");
-      }
-      probe.successCount++;
-    }
-    //here all is well, so notify the reporter
-    reportHandler.liveProbeCycleCompleted();
-  }
-
-  /**
-   * Run the set of probes relevant for this phase of the probe lifecycle.
-   * @throws ProbeFailedException a probe failed
-   * @throws ProbeInterruptedException the probe process was explicitly interrupted
-   */
-  protected void executeProbePhases() throws ProbeFailedException, ProbeInterruptedException {
-    switch (probePhase) {
-      case INIT:
-        enterProbePhase(ProbePhase.DEPENDENCY_CHECKING);
-        //fall through straight into the dependency check
-      case DEPENDENCY_CHECKING:
-        if (checkAndReportDependencyProbes()) {
-          enterProbePhase(ProbePhase.BOOTSTRAPPING);
-          beginBootstrapProbes();
-        }
-        break;
-      case BOOTSTRAPPING:
-        if (checkAndReportBootstrapProbes()) {
-          enterProbePhase(ProbePhase.LIVE);
-        }
-        break;
-      case LIVE:
-        checkAndReportLiveProbes();
-        break;
-
-      case TERMINATING:
-      default:
-        //do nothing.
-        break;
-    }
-  }
-
-
-  /**
-   * Raise a probe failure; injecting the phase into the status result first
-   *
-   * @param status ping result
-   * @param text optional text -null or "" means "none"
-   * @return an exception ready to throw
-   */
-  private ProbeFailedException raiseProbeFailure(ProbeStatus status, String text) {
-    status.setProbePhase(probePhase);
-    log.info("Probe failed: " + status);
-    return new ProbeFailedException(text, status);
-  }
-
-  @Override
-  public void run() {
-    int size = monitorProbes.size();
-    log.info("Probe Worker Starting; " + size + " probe" + MonitorUtils.toPlural(size) + ":");
-    enterProbePhase(ProbePhase.DEPENDENCY_CHECKING);
-    for (Probe probe : monitorProbes) {
-      log.info(probe.getName());
-    }
-    while (!mustExit) {
-      try {
-        Thread.sleep(interval);
-        executeProbePhases();
-      } catch (ProbeFailedException e) {
-        //relay to the inner loop handler
-        probeFailed(e);
-      } catch (InterruptedException interrupted) {
-        break;
-      } catch (ProbeInterruptedException e) {
-        //exit raised.
-        //this will be true, just making extra-sure
-        break;
-      }
-    }
-    log.info("Probe Worker Exiting");
-    enterProbePhase(ProbePhase.TERMINATING);
-  }
-
-
-  protected void probeFailed(ProbeFailedException e) {
-    reportHandler.probeFailure(e);
-  }
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: YARN-6722. Bumping up pom file hadoop version. Contributed by Panagiotis Garefalakis.

Posted by ji...@apache.org.
YARN-6722. Bumping up pom file hadoop version. Contributed by Panagiotis Garefalakis.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5560b31d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5560b31d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5560b31d

Branch: refs/heads/yarn-native-services
Commit: 5560b31da74de0efcffd302c420082fedec42b97
Parents: 5c0f96c
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Jun 20 11:21:41 2017 +0900
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:26 2017 -0700

----------------------------------------------------------------------
 .../hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml    | 4 ++--
 .../hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml           | 2 +-
 .../hadoop-yarn-applications/hadoop-yarn-slider/pom.xml          | 2 +-
 3 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5560b31d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
index bc714db..2b1b858 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
@@ -19,12 +19,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-yarn-applications</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-yarn-services-api</artifactId>
   <name>Apache Hadoop YARN Services API</name>
-  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <version>3.0.0-alpha4-SNAPSHOT</version>
   <packaging>jar</packaging>
   <description>Hadoop YARN REST APIs for services</description>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5560b31d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
index 05c4d88..f9dd799 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-yarn-slider</artifactId>
-    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <version>3.0.0-alpha4-SNAPSHOT</version>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-yarn-slider-core</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5560b31d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/pom.xml
index cc42b18..a8b66ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/pom.xml
@@ -19,7 +19,7 @@
     <parent>
         <artifactId>hadoop-yarn-applications</artifactId>
         <groupId>org.apache.hadoop</groupId>
-        <version>3.0.0-alpha3-SNAPSHOT</version>
+        <version>3.0.0-alpha4-SNAPSHOT</version>
     </parent>
     <modelVersion>4.0.0</modelVersion>
     <groupId>org.apache.hadoop</groupId>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: YARN-6335. Port slider's groovy unit tests to yarn native services. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
YARN-6335. Port slider's groovy unit tests to yarn native services. Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/256a1597
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/256a1597
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/256a1597

Branch: refs/heads/yarn-native-services
Commit: 256a1597671a9cd872b8204b8c81ce2c9a8f1cea
Parents: e2e4559
Author: Jian He <ji...@apache.org>
Authored: Thu Apr 20 23:53:04 2017 +0800
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:25 2017 -0700

----------------------------------------------------------------------
 .../api/impl/ApplicationApiService.java         |   11 +-
 .../hadoop-yarn-slider-core/pom.xml             |   31 +-
 .../org/apache/slider/api/InternalKeys.java     |    6 +-
 .../org/apache/slider/api/ResourceKeys.java     |   15 +-
 .../slider/api/SliderClusterProtocol.java       |    4 +-
 .../slider/api/resource/Configuration.java      |   22 +
 .../apache/slider/api/types/RoleStatistics.java |    6 -
 .../org/apache/slider/client/SliderClient.java  |  512 ++-------
 .../apache/slider/client/SliderClientAPI.java   |   62 +-
 .../client/ipc/SliderClusterOperations.java     |   20 +-
 .../apache/slider/common/SliderXmlConfKeys.java |   19 -
 .../AbstractClusterBuildingActionArgs.java      |   53 +
 .../slider/common/params/ActionBuildArgs.java   |   31 +
 .../slider/common/params/ActionCreateArgs.java  |   12 -
 .../slider/common/params/ActionEchoArgs.java    |   33 -
 .../slider/common/params/ActionFlexArgs.java    |   33 +-
 .../common/params/ActionInstallKeytabArgs.java  |   57 -
 .../common/params/ActionInstallPackageArgs.java |   58 -
 .../slider/common/params/ActionKeytabArgs.java  |    7 -
 .../slider/common/params/ActionPackageArgs.java |   81 --
 .../slider/common/params/ActionUpgradeArgs.java |    2 +-
 .../params/AppAndResouceOptionArgsDelegate.java |  111 --
 .../apache/slider/common/params/Arguments.java  |   26 -
 .../apache/slider/common/params/ClientArgs.java |   34 +-
 .../common/params/ComponentArgsDelegate.java    |    2 +-
 .../common/params/OptionArgsDelegate.java       |   66 ++
 .../slider/common/params/SliderActions.java     |    7 -
 .../slider/common/tools/ConfigHelper.java       |    8 -
 .../apache/slider/common/tools/SliderUtils.java |  118 +-
 .../slider/core/launch/AbstractLauncher.java    |    1 -
 .../apache/slider/core/zk/ZKIntegration.java    |    9 +-
 .../apache/slider/providers/ProviderRole.java   |   12 +-
 .../server/appmaster/SliderAppMaster.java       |   63 +-
 .../appmaster/actions/ActionFlexCluster.java    |    4 +-
 .../server/appmaster/metrics/SliderMetrics.java |   12 +-
 .../rpc/SliderClusterProtocolPBImpl.java        |    6 +-
 .../rpc/SliderClusterProtocolProxy.java         |    6 +-
 .../server/appmaster/rpc/SliderIPCService.java  |    6 +-
 .../security/SecurityConfiguration.java         |  235 ++--
 .../slider/server/appmaster/state/AppState.java |  257 +++--
 .../appmaster/state/ContainerOutcome.java       |    4 +-
 .../server/appmaster/state/NodeEntry.java       |    2 +-
 .../server/appmaster/state/RoleStatus.java      |   46 +-
 .../server/appmaster/web/view/IndexBlock.java   |    4 +-
 .../src/main/proto/SliderClusterMessages.proto  |   10 +-
 .../src/main/proto/SliderClusterProtocol.proto  |    2 +-
 .../org/apache/slider/api/TestRPCBinding.java   |   50 +
 .../apache/slider/client/TestClientBadArgs.java |  229 ++++
 .../slider/client/TestClientBasicArgs.java      |   81 ++
 .../slider/client/TestCommonArgParsing.java     |  522 +++++++++
 .../slider/client/TestKeytabCommandOptions.java |  405 +++++++
 .../slider/client/TestSliderClientMethods.java  |  142 +++
 .../slider/client/TestSliderTokensCommand.java  |  124 ++
 .../slider/common/tools/TestClusterNames.java   |  122 ++
 .../slider/common/tools/TestConfigHelper.java   |   57 +
 .../common/tools/TestConfigHelperHDFS.java      |   57 +
 .../common/tools/TestExecutionEnvironment.java  |   67 ++
 .../common/tools/TestMiscSliderUtils.java       |   49 +
 .../slider/common/tools/TestPortScan.java       |  184 +++
 .../common/tools/TestSliderFileSystem.java      |   62 +
 .../common/tools/TestSliderTestUtils.java       |   97 ++
 .../slider/common/tools/TestSliderUtils.java    |   25 -
 .../slider/common/tools/TestWindowsSupport.java |  177 +++
 .../slider/common/tools/TestZKIntegration.java  |  187 +++
 .../slider/core/conf/ExampleConfResources.java  |   58 +
 .../core/conf/TestConfTreeLoadExamples.java     |   64 ++
 .../core/conf/TestConfigurationResolve.java     |  118 ++
 .../slider/other/TestFilesystemPermissions.java |  263 +++++
 .../apache/slider/other/TestLocalDirStatus.java |  166 +++
 .../slider/providers/TestProviderFactory.java   |   54 +
 .../slider/registry/TestConfigSetNaming.java    |   85 ++
 .../slider/registry/TestRegistryPaths.java      |   74 ++
 .../server/appmaster/actions/TestActions.java   |  246 ++++
 .../model/appstate/BaseMockAppStateAATest.java  |   73 ++
 .../TestMockAppStateAAOvercapacity.java         |  112 ++
 .../appstate/TestMockAppStateAAPlacement.java   |  380 +++++++
 .../TestMockAppStateContainerFailure.java       |  387 +++++++
 .../TestMockAppStateDynamicHistory.java         |  212 ++++
 .../appstate/TestMockAppStateDynamicRoles.java  |  243 ++++
 .../TestMockAppStateFlexDynamicRoles.java       |  160 +++
 .../model/appstate/TestMockAppStateFlexing.java |  201 ++++
 .../appstate/TestMockAppStateRMOperations.java  |  382 +++++++
 .../TestMockAppStateRebuildOnAMRestart.java     |  117 ++
 .../appstate/TestMockAppStateRolePlacement.java |  122 ++
 .../appstate/TestMockAppStateRoleRelease.java   |   82 ++
 .../appstate/TestMockAppStateUniqueNames.java   |  111 ++
 .../TestMockContainerResourceAllocations.java   |   89 ++
 .../appstate/TestMockLabelledAAPlacement.java   |  156 +++
 .../TestOutstandingRequestValidation.java       |  110 ++
 .../model/history/TestRoleHistoryAA.java        |  269 +++++
 .../history/TestRoleHistoryContainerEvents.java |  447 ++++++++
 ...TestRoleHistoryFindNodesForNewInstances.java |  177 +++
 .../history/TestRoleHistoryNIComparators.java   |  133 +++
 ...estRoleHistoryOutstandingRequestTracker.java |  385 +++++++
 .../model/history/TestRoleHistoryRW.java        |  371 ++++++
 .../history/TestRoleHistoryRWOrdering.java      |  162 +++
 .../history/TestRoleHistoryRequestTracking.java |  298 +++++
 .../history/TestRoleHistoryUpdateBlacklist.java |  117 ++
 .../server/appmaster/model/mock/Allocator.java  |  123 ++
 .../model/mock/BaseMockAppStateTest.java        |  524 +++++++++
 .../server/appmaster/model/mock/MockAM.java     |   26 +
 .../appmaster/model/mock/MockAppState.java      |   82 ++
 .../model/mock/MockApplicationAttemptId.java    |   61 +
 .../appmaster/model/mock/MockApplicationId.java |   67 ++
 .../model/mock/MockClusterServices.java         |   38 +
 .../appmaster/model/mock/MockContainer.java     |  131 +++
 .../appmaster/model/mock/MockContainerId.java   |  104 ++
 .../appmaster/model/mock/MockFactory.java       |  270 +++++
 .../appmaster/model/mock/MockFileSystem.java    |   32 +
 .../server/appmaster/model/mock/MockNodeId.java |   62 +
 .../appmaster/model/mock/MockPriority.java      |   46 +
 .../model/mock/MockProviderService.java         |  140 +++
 .../model/mock/MockRMOperationHandler.java      |  120 ++
 .../appmaster/model/mock/MockRecordFactory.java |   27 +
 .../model/mock/MockRegistryOperations.java      |   83 ++
 .../appmaster/model/mock/MockResource.java      |   75 ++
 .../appmaster/model/mock/MockRoleHistory.java   |   53 +
 .../server/appmaster/model/mock/MockRoles.java  |   30 +
 .../appmaster/model/mock/MockYarnCluster.java   |  342 ++++++
 .../appmaster/model/mock/MockYarnEngine.java    |  188 ++++
 .../appmaster/model/monkey/TestMockMonkey.java  |  208 ++++
 .../security/TestSecurityConfiguration.java     |  215 ++++
 .../TestServiceTimelinePublisher.java           |   24 +-
 .../web/rest/registry/PathEntryMarshalling.java |   28 +
 .../registry/TestRegistryRestMarshalling.java   |   51 +
 .../web/view/TestClusterSpecificationBlock.java |   78 ++
 .../web/view/TestContainerStatsBlock.java       |  255 +++++
 .../appmaster/web/view/TestIndexBlock.java      |  175 +++
 .../slider/server/management/TestGauges.java    |   55 +
 .../server/servicemonitor/TestPortProbe.java    |    2 +-
 .../apache/slider/test/ContractTestUtils.java   |  901 ---------------
 .../org/apache/slider/tools/TestUtility.java    |  181 ---
 .../apache/slider/utils/ContractTestUtils.java  |  901 +++++++++++++++
 .../org/apache/slider/utils/KeysForTests.java   |   38 +
 .../org/apache/slider/utils/MicroZKCluster.java |   87 ++
 .../java/org/apache/slider/utils/Outcome.java   |   46 +
 .../org/apache/slider/utils/SliderTestBase.java |   60 +
 .../apache/slider/utils/SliderTestUtils.java    | 1065 ++++++++++++++++++
 .../org/apache/slider/utils/TestAssertions.java |   60 +
 .../org/apache/slider/utils/TestUtility.java    |  181 +++
 .../slider/utils/YarnMiniClusterTestBase.java   |  832 ++++++++++++++
 .../slider/utils/YarnZKMiniClusterTestBase.java |  179 +++
 .../src/test/resources/log4j.properties         |   66 ++
 .../slider/common/tools/test/metainfo.txt       |   16 -
 .../slider/common/tools/test/metainfo.xml       |   98 --
 .../slider/common/tools/test/someOtherFile.txt  |   16 -
 .../slider/common/tools/test/someOtherFile.xml  |   17 -
 .../conf/examples/app-override-resolved.json    |   49 +
 .../slider/core/conf/examples/app-override.json |   43 +
 .../slider/core/conf/examples/app-resolved.json |   81 ++
 .../apache/slider/core/conf/examples/app.json   |   54 +
 .../agent/application/metadata/metainfo.xml     |  180 ---
 .../appmaster/web/rest/registry/sample.json     |    9 +
 .../slider/server/avro/history-v01-3-role.json  |    6 +
 .../slider/server/avro/history-v01-6-role.json  |    8 +
 .../slider/server/avro/history_v01b_1_role.json |   38 +
 156 files changed, 17407 insertions(+), 2707 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
index b4f6a2e..5a4de0e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
@@ -50,6 +50,7 @@ import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response.Status;
 import java.io.IOException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -251,10 +252,12 @@ public class ApplicationApiService {
               .getNumberOfContainers()).build();
     }
     try {
-      long original = SLIDER_CLIENT.flex(appName, component);
-      return Response.ok().entity(
-          "Updating " + componentName + " size from " + original + " to "
-              + component.getNumberOfContainers()).build();
+      Map<String, Long> original = SLIDER_CLIENT.flex(appName, Collections
+          .singletonMap(component.getName(),
+              component.getNumberOfContainers()));
+      return Response.ok().entity("Updating " + componentName + " size from "
+          + original.get(componentName) + " to "
+          + component.getNumberOfContainers()).build();
     } catch (YarnException | IOException e) {
       ApplicationStatus status = new ApplicationStatus();
       status.setDiagnostics(e.getMessage());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
index 16a2bb2..05c4d88 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
@@ -327,16 +327,38 @@
       <artifactId>easymock</artifactId>
       <version>3.1</version>
       <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>org.objenesis</groupId>
+          <artifactId>objenesis</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
 
     <dependency>
       <groupId>org.powermock</groupId>
       <artifactId>powermock-api-easymock</artifactId>
-      <version>1.5</version>
+      <version>1.6.5</version>
       <scope>test</scope>
     </dependency>
 
     <dependency>
+      <groupId>org.powermock</groupId>
+      <artifactId>powermock-module-junit4</artifactId>
+      <version>1.6.5</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.javassist</groupId>
+          <artifactId>javassist</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.objenesis</groupId>
+          <artifactId>objenesis</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
       <groupId>javax.servlet.jsp</groupId>
       <artifactId>jsp-api</artifactId>
       <scope>runtime</scope>
@@ -359,6 +381,13 @@
         <artifactId>swagger-annotations</artifactId>
         <version>1.5.4</version>
     </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minicluster</artifactId>
+      <scope>test</scope>
+    </dependency>
+
   </dependencies>
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/InternalKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/InternalKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/InternalKeys.java
index fcaaf0e..f690f5a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/InternalKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/InternalKeys.java
@@ -126,9 +126,9 @@ public interface InternalKeys {
   String CHAOS_MONKEY_INTERVAL_MINUTES = CHAOS_MONKEY_INTERVAL + ".minutes";
   String CHAOS_MONKEY_INTERVAL_SECONDS = CHAOS_MONKEY_INTERVAL + ".seconds";
   
-  int DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS = 0;
-  int DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS = 0;
-  int DEFAULT_CHAOS_MONKEY_INTERVAL_MINUTES = 0;
+  long DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS = 0;
+  long DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS = 0;
+  long DEFAULT_CHAOS_MONKEY_INTERVAL_MINUTES = 0;
 
   String CHAOS_MONKEY_DELAY = "internal.chaos.monkey.delay";
   String CHAOS_MONKEY_DELAY_DAYS = CHAOS_MONKEY_DELAY + ".days";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ResourceKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ResourceKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ResourceKeys.java
index 92890be..2f71004 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ResourceKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ResourceKeys.java
@@ -72,6 +72,15 @@ public interface ResourceKeys {
   String YARN_CORES = "yarn.vcores";
 
   /**
+   * If normalization is set to false, then if the resource (memory and/or
+   * vcore) requested by a role is higher than YARN limits, then the resource
+   * request is not normalized. If this causes failures at the YARN level then
+   * applications are expecting that to happen. Default value is true.
+   */
+  String YARN_RESOURCE_NORMALIZATION_ENABLED =
+      "yarn.resource.normalization.enabled";
+
+  /**
    * Number of disks per instance to ask YARN for
    *  {@value}
    */
@@ -140,9 +149,9 @@ public interface ResourceKeys {
 
 
 
-  int DEFAULT_CONTAINER_FAILURE_WINDOW_DAYS = 0;
-  int DEFAULT_CONTAINER_FAILURE_WINDOW_HOURS = 6;
-  int DEFAULT_CONTAINER_FAILURE_WINDOW_MINUTES = 0;
+  long DEFAULT_CONTAINER_FAILURE_WINDOW_DAYS = 0;
+  long DEFAULT_CONTAINER_FAILURE_WINDOW_HOURS = 6;
+  long DEFAULT_CONTAINER_FAILURE_WINDOW_MINUTES = 0;
 
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java
index 7f768b9..448d4ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java
@@ -53,8 +53,8 @@ public interface SliderClusterProtocol extends VersionedProtocol {
       YarnException;
 
 
-  Messages.FlexComponentResponseProto flexComponent(
-      Messages.FlexComponentRequestProto request) throws IOException;
+  Messages.FlexComponentsResponseProto flexComponents(
+      Messages.FlexComponentsRequestProto request) throws IOException;
 
   /**
    * Get the current cluster status

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
index c43bd64..7b3b93e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
@@ -116,6 +116,28 @@ public class Configuration implements Serializable {
     return Long.parseLong(value);
   }
 
+  public int getPropertyInt(String name, int defaultValue) {
+    if (name == null) {
+      return defaultValue;
+    }
+    String value = properties.get(name.trim());
+    if (StringUtils.isEmpty(value)) {
+      return defaultValue;
+    }
+    return Integer.parseInt(value);
+  }
+
+  public boolean getPropertyBool(String name, boolean defaultValue) {
+    if (name == null) {
+      return defaultValue;
+    }
+    String value = properties.get(name.trim());
+    if (StringUtils.isEmpty(value)) {
+      return defaultValue;
+    }
+    return Boolean.parseBoolean(value);
+  }
+
   public String getProperty(String name, String defaultValue) {
     if (name == null) {
       return defaultValue;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/RoleStatistics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/RoleStatistics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/RoleStatistics.java
index c926600..25f4d9d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/RoleStatistics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/RoleStatistics.java
@@ -35,11 +35,8 @@ public class RoleStatistics {
   public long limitsExceeded = 0L;
   public long nodeFailed = 0L;
   public long preempted = 0L;
-  public long releasing = 0L;
   public long requested = 0L;
   public long started = 0L;
-  public long startFailed = 0L;
-  public long totalRequested = 0L;
 
   /**
    * Add another statistics instance
@@ -56,11 +53,8 @@ public class RoleStatistics {
     limitsExceeded += that.limitsExceeded;
     nodeFailed += that.nodeFailed;
     preempted += that.preempted;
-    releasing += that.releasing;
     requested += that.requested;
     started += that.started;
-    startFailed += that.totalRequested;
-    totalRequested += that.totalRequested;
     return this;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index 8bceddf..2b0982f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -20,12 +20,9 @@ package org.apache.slider.client;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.io.Files;
-import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
@@ -67,7 +64,6 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Times;
-import org.apache.slider.api.ClusterNode;
 import org.apache.slider.api.SliderClusterProtocol;
 import org.apache.slider.api.proto.Messages;
 import org.apache.slider.api.resource.Application;
@@ -83,22 +79,17 @@ import org.apache.slider.common.params.AbstractActionArgs;
 import org.apache.slider.common.params.AbstractClusterBuildingActionArgs;
 import org.apache.slider.common.params.ActionAMSuicideArgs;
 import org.apache.slider.common.params.ActionClientArgs;
-import org.apache.slider.common.params.ActionCreateArgs;
 import org.apache.slider.common.params.ActionDependencyArgs;
 import org.apache.slider.common.params.ActionDiagnosticArgs;
-import org.apache.slider.common.params.ActionEchoArgs;
 import org.apache.slider.common.params.ActionExistsArgs;
 import org.apache.slider.common.params.ActionFlexArgs;
 import org.apache.slider.common.params.ActionFreezeArgs;
-import org.apache.slider.common.params.ActionInstallKeytabArgs;
-import org.apache.slider.common.params.ActionInstallPackageArgs;
 import org.apache.slider.common.params.ActionKDiagArgs;
 import org.apache.slider.common.params.ActionKeytabArgs;
 import org.apache.slider.common.params.ActionKillContainerArgs;
 import org.apache.slider.common.params.ActionListArgs;
 import org.apache.slider.common.params.ActionLookupArgs;
 import org.apache.slider.common.params.ActionNodesArgs;
-import org.apache.slider.common.params.ActionPackageArgs;
 import org.apache.slider.common.params.ActionRegistryArgs;
 import org.apache.slider.common.params.ActionResolveArgs;
 import org.apache.slider.common.params.ActionResourceArgs;
@@ -122,7 +113,6 @@ import org.apache.slider.core.exceptions.NotFoundException;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.exceptions.UnknownApplicationInstanceException;
 import org.apache.slider.core.exceptions.UsageException;
-import org.apache.slider.core.exceptions.WaitTimeoutException;
 import org.apache.slider.core.launch.ClasspathConstructor;
 import org.apache.slider.core.launch.CredentialUtils;
 import org.apache.slider.core.launch.JavaCommandLineBuilder;
@@ -144,7 +134,6 @@ import org.apache.slider.core.zk.ZKIntegration;
 import org.apache.slider.providers.AbstractClientProvider;
 import org.apache.slider.providers.ProviderUtils;
 import org.apache.slider.providers.SliderProviderFactory;
-import org.apache.slider.providers.agent.AgentKeys;
 import org.apache.slider.server.appmaster.SliderAppMaster;
 import org.apache.slider.server.appmaster.rpc.RpcBinder;
 import org.apache.slider.server.services.utility.AbstractSliderLaunchedService;
@@ -160,11 +149,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.ByteArrayOutputStream;
-import java.io.Console;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.InputStream;
 import java.io.InterruptedIOException;
 import java.io.OutputStreamWriter;
 import java.io.PrintStream;
@@ -177,8 +164,6 @@ import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
@@ -226,7 +211,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
 
   private ClientArgs serviceArgs;
   public ApplicationId applicationId;
-  
+
   private String deployedClusterName;
   /**
    * Cluster operations against the deployed cluster -will be null
@@ -334,23 +319,19 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
         exitCode = actionAmSuicide(clusterName,
             serviceArgs.getActionAMSuicideArgs());
         break;
-      
+
+      case ACTION_BUILD:
+        exitCode = actionBuild(getApplicationFromArgs(clusterName,
+            serviceArgs.getActionBuildArgs()));
+        break;
+
       case ACTION_CLIENT:
         exitCode = actionClient(serviceArgs.getActionClientArgs());
         break;
 
       case ACTION_CREATE:
-        ActionCreateArgs args = serviceArgs.getActionCreateArgs();
-        File file = args.getAppDef();
-        Path filePath = new Path(file.getAbsolutePath());
-        log.info("Loading app definition from: " + filePath);
-        Application application =
-            jsonSerDeser.load(FileSystem.getLocal(getConfig()), filePath);
-        if(args.lifetime > 0) {
-          application.setLifetime(args.lifetime);
-        }
-        application.setName(clusterName);
-        actionCreate(application);
+        actionCreate(getApplicationFromArgs(clusterName,
+            serviceArgs.getActionCreateArgs()));
         break;
 
       case ACTION_DEPENDENCY:
@@ -391,14 +372,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
             serviceArgs.getActionKillContainerArgs());
         break;
 
-      case ACTION_INSTALL_KEYTAB:
-        exitCode = actionInstallKeytab(serviceArgs.getActionInstallKeytabArgs());
-        break;
-      
-      case ACTION_INSTALL_PACKAGE:
-        exitCode = actionInstallPkg(serviceArgs.getActionInstallPackageArgs());
-        break;
-
       case ACTION_KEYTAB:
         exitCode = actionKeytab(serviceArgs.getActionKeytabArgs());
         break;
@@ -415,10 +388,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
         exitCode = actionNodes("", serviceArgs.getActionNodesArgs());
         break;
 
-      case ACTION_PACKAGE:
-        exitCode = actionPackage(serviceArgs.getActionPackageArgs());
-        break;
-
       case ACTION_REGISTRY:
         exitCode = actionRegistry(serviceArgs.getActionRegistryArgs());
         break;
@@ -605,15 +574,15 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
    * force=true by default.
    */
   @Override
-  public void actionDestroy(String appName)
+  public int actionDestroy(String appName)
       throws YarnException, IOException {
     validateClusterName(appName);
+    verifyNoLiveApp(appName, "Destroy");
     Path appDir = sliderFileSystem.buildClusterDirPath(appName);
     FileSystem fs = sliderFileSystem.getFileSystem();
     if (fs.exists(appDir)) {
       if (fs.delete(appDir, true)) {
-        log.info("Successfully deleted application + " + appName);
-        return;
+        log.info("Successfully deleted application dir for " + appName);
       } else {
         String message =
             "Failed to delete application + " + appName + " at:  " + appDir;
@@ -627,7 +596,20 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
       log.warn(message);
       throw new YarnException(message);
     }
-    //TODO clean registry
+
+    //TODO clean registry?
+    String registryPath = SliderRegistryUtils.registryPathForInstance(
+        appName);
+    try {
+      getRegistryOperations().delete(registryPath, true);
+    } catch (IOException e) {
+      log.warn("Error deleting registry entry {}: {} ", registryPath, e, e);
+    } catch (SliderException e) {
+      log.warn("Error binding to registry {} ", e, e);
+    }
+
+    log.info("Destroyed cluster {}", appName);
+    return EXIT_SUCCESS;
   }
 
   
@@ -648,6 +630,26 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     return factory.createClientProvider();
   }
 
+  private Application getApplicationFromArgs(String clusterName,
+      AbstractClusterBuildingActionArgs args) throws IOException {
+    File file = args.getAppDef();
+    Path filePath = new Path(file.getAbsolutePath());
+    log.info("Loading app definition from: " + filePath);
+    Application application =
+        jsonSerDeser.load(FileSystem.getLocal(getConfig()), filePath);
+    if(args.lifetime > 0) {
+      application.setLifetime(args.lifetime);
+    }
+    application.setName(clusterName);
+    return application;
+  }
+
+  public int actionBuild(Application application) throws YarnException,
+      IOException {
+    Path appDir = checkAppNotExistOnHdfs(application);
+    persistApp(appDir, application);
+    return EXIT_SUCCESS;
+  }
 
   public ApplicationId actionCreate(Application application)
       throws IOException, YarnException {
@@ -684,8 +686,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     }
     submissionContext.setMaxAppAttempts(conf.getInt(KEY_AM_RESTART_LIMIT, 2));
 
-    Map<String, LocalResource> localResources =
-        new HashMap<String, LocalResource>();
+    Map<String, LocalResource> localResources = new HashMap<>();
 
     // copy local slideram-log4j.properties to hdfs and add to localResources
     boolean hasSliderAMLog4j =
@@ -724,10 +725,16 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     amLaunchContext.setLocalResources(localResources);
     addCredentialsIfSecure(conf, amLaunchContext);
     submissionContext.setAMContainerSpec(amLaunchContext);
-    yarnClient.submitApplication(submissionContext);
+    submitApplication(submissionContext);
     return submissionContext.getApplicationId();
   }
 
+  @VisibleForTesting
+  public ApplicationId submitApplication(ApplicationSubmissionContext context)
+      throws IOException, YarnException {
+    return yarnClient.submitApplication(context);
+  }
+
   private void printLocalResources(Map<String, LocalResource> map) {
     log.info("Added LocalResource for localization: ");
     StringBuilder builder = new StringBuilder();
@@ -800,7 +807,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
 
   private Map<String, String> addAMEnv(Configuration conf, Path tempPath)
       throws IOException {
-    Map<String, String> env = new HashMap<String, String>();
+    Map<String, String> env = new HashMap<>();
     ClasspathConstructor classpath =
         buildClasspath(SliderKeys.SUBMITTED_CONF_DIR, "lib",
             sliderFileSystem, getUsingMiniMRCluster());
@@ -929,69 +936,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     return 0;
   }
 
-//  protected static void checkForCredentials(Configuration conf,
-//      ConfTree tree, String clusterName) throws IOException {
-//    if (tree.credentials == null || tree.credentials.isEmpty()) {
-//      log.info("No credentials requested");
-//      return;
-//    }
-//
-//    Console console = System.console();
-//    for (Entry<String, List<String>> cred : tree.credentials.entrySet()) {
-//      String provider = cred.getKey()
-//          .replaceAll(Pattern.quote("${CLUSTER_NAME}"), clusterName)
-//          .replaceAll(Pattern.quote("${CLUSTER}"), clusterName);
-//      List<String> aliases = cred.getValue();
-//      if (aliases == null || aliases.isEmpty()) {
-//        continue;
-//      }
-//      Configuration c = new Configuration(conf);
-//      c.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, provider);
-//      CredentialProvider credentialProvider = CredentialProviderFactory.getProviders(c).get(0);
-//      Set<String> existingAliases = new HashSet<>(credentialProvider.getAliases());
-//      for (String alias : aliases) {
-//        if (existingAliases.contains(alias.toLowerCase(Locale.ENGLISH))) {
-//          log.info("Credentials for " + alias + " found in " + provider);
-//        } else {
-//          if (console == null) {
-//            throw new IOException("Unable to input password for " + alias +
-//                " because System.console() is null; provider " + provider +
-//                " must be populated manually");
-//          }
-//          char[] pass = readPassword(alias, console);
-//          credentialProvider.createCredentialEntry(alias, pass);
-//          credentialProvider.flush();
-//          Arrays.fill(pass, ' ');
-//        }
-//      }
-//    }
-//  }
-
-  private static char[] readPassword(String alias, Console console)
-      throws IOException {
-    char[] cred = null;
-
-    boolean noMatch;
-    do {
-      console.printf("%s %s: \n", PASSWORD_PROMPT, alias);
-      char[] newPassword1 = console.readPassword();
-      console.printf("%s %s again: \n", PASSWORD_PROMPT, alias);
-      char[] newPassword2 = console.readPassword();
-      noMatch = !Arrays.equals(newPassword1, newPassword2);
-      if (noMatch) {
-        if (newPassword1 != null) Arrays.fill(newPassword1, ' ');
-        log.info(String.format("Passwords don't match. Try again."));
-      } else {
-        cred = newPassword1;
-      }
-      if (newPassword2 != null) Arrays.fill(newPassword2, ' ');
-    } while (noMatch);
-    if (cred == null)
-      throw new IOException("Could not read credentials for " + alias +
-          " from stdin");
-    return cred;
-  }
-
   @Override
   public int actionKeytab(ActionKeytabArgs keytabInfo)
       throws YarnException, IOException {
@@ -1078,43 +1022,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   }
 
   @Override
-  public int actionInstallKeytab(ActionInstallKeytabArgs installKeytabInfo)
-      throws YarnException, IOException {
-    log.warn("The 'install-keytab' option has been deprecated.  Please use 'keytab --install'.");
-    return actionKeytab(new ActionKeytabArgs(installKeytabInfo));
-  }
-
-  @Override
-  public int actionInstallPkg(ActionInstallPackageArgs installPkgInfo) throws
-      YarnException,
-      IOException {
-    log.warn("The " + ACTION_INSTALL_PACKAGE
-        + " option has been deprecated. Please use '"
-        + ACTION_PACKAGE + " " + ClientArgs.ARG_INSTALL + "'.");
-    if (StringUtils.isEmpty(installPkgInfo.name)) {
-      throw new BadCommandArgumentsException(
-          E_INVALID_APPLICATION_TYPE_NAME + "\n"
-              + CommonArgs.usage(serviceArgs, ACTION_INSTALL_PACKAGE));
-    }
-    Path srcFile = extractPackagePath(installPkgInfo.packageURI);
-
-    // Do not provide new options to install-package command as it is in
-    // deprecated mode. So version is kept null here. Use package --install.
-    Path pkgPath = sliderFileSystem.buildPackageDirPath(installPkgInfo.name,
-        null);
-    FileSystem sfs = sliderFileSystem.getFileSystem();
-    sfs.mkdirs(pkgPath);
-
-    Path fileInFs = new Path(pkgPath, srcFile.getName());
-    log.info("Installing package {} at {} and overwrite is {}.",
-        srcFile, fileInFs, installPkgInfo.replacePkg);
-    require(!(sfs.exists(fileInFs) && !installPkgInfo.replacePkg),
-          "Package exists at %s. : %s", fileInFs.toUri(), E_USE_REPLACEPKG_TO_OVERWRITE);
-    sfs.copyFromLocalFile(false, installPkgInfo.replacePkg, srcFile, fileInFs);
-    return EXIT_SUCCESS;
-  }
-
-  @Override
   public int actionResource(ActionResourceArgs resourceInfo)
       throws YarnException, IOException {
     if (resourceInfo.help) {
@@ -1287,236 +1194,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     return EXIT_SUCCESS;
   }
 
-
-  @Override
-  public int actionPackage(ActionPackageArgs actionPackageInfo)
-      throws YarnException, IOException {
-    initializeOutputStream(actionPackageInfo.out);
-    int exitCode = -1;
-    if (actionPackageInfo.help) {
-      exitCode = actionHelp(ACTION_PACKAGE);
-    }
-    if (actionPackageInfo.install) {
-      exitCode = actionPackageInstall(actionPackageInfo);
-    }
-    if (actionPackageInfo.delete) {
-      exitCode = actionPackageDelete(actionPackageInfo);
-    }
-    if (actionPackageInfo.list) {
-      exitCode = actionPackageList();
-    }
-    if (actionPackageInfo.instances) {
-      exitCode = actionPackageInstances();
-    }
-    finalizeOutputStream(actionPackageInfo.out);
-    if (exitCode != -1) {
-      return exitCode;
-    }
-    throw new BadCommandArgumentsException(
-        "Select valid package operation option");
-  }
-
-  private void initializeOutputStream(String outFile)
-      throws IOException {
-    if (outFile != null) {
-      clientOutputStream = new PrintStream(outFile, "UTF-8");
-    } else {
-      clientOutputStream = System.out;
-    }
-  }
-
-  private void finalizeOutputStream(String outFile) {
-    if (outFile != null && clientOutputStream != null) {
-      clientOutputStream.flush();
-      clientOutputStream.close();
-    }
-    clientOutputStream = System.out;
-  }
-
-  private int actionPackageInstances() throws YarnException, IOException {
-//    Map<String, Path> persistentInstances = sliderFileSystem
-//        .listPersistentInstances();
-//    if (persistentInstances.isEmpty()) {
-//      log.info("No slider cluster specification available");
-//      return EXIT_SUCCESS;
-//    }
-//    String pkgPathValue = sliderFileSystem
-//        .buildPackageDirPath(StringUtils.EMPTY, StringUtils.EMPTY).toUri()
-//        .getPath();
-//    FileSystem fs = sliderFileSystem.getFileSystem();
-//    Iterator<Map.Entry<String, Path>> instanceItr = persistentInstances
-//        .entrySet().iterator();
-//    log.info("List of applications with its package name and path");
-//    println("%-25s  %15s  %30s  %s", "Cluster Name", "Package Name",
-//        "Package Version", "Application Location");
-    //TODO deal with packages
-//    while(instanceItr.hasNext()) {
-//      Map.Entry<String, Path> entry = instanceItr.next();
-//      String clusterName = entry.getKey();
-//      Path clusterPath = entry.getValue();
-//      AggregateConf instanceDefinition = loadInstanceDefinitionUnresolved(
-//          clusterName, clusterPath);
-//      Path appDefPath = null;
-//      try {
-//        appDefPath = new Path(
-//            getApplicationDefinitionPath(instanceDefinition
-//                .getAppConfOperations()));
-//      } catch (BadConfigException e) {
-//        // Invalid cluster state, so move on to next. No need to log anything
-//        // as this is just listing of instances.
-//        continue;
-//      }
-//      if (!appDefPath.isUriPathAbsolute()) {
-//        appDefPath = new Path(fs.getHomeDirectory(), appDefPath);
-//      }
-//      String appDefPathStr = appDefPath.toUri().toString();
-//      try {
-//        if (appDefPathStr.contains(pkgPathValue) && fs.isFile(appDefPath)) {
-//          String packageName = appDefPath.getParent().getName();
-//          String packageVersion = StringUtils.EMPTY;
-//          if (instanceDefinition.isVersioned()) {
-//            packageVersion = packageName;
-//            packageName = appDefPath.getParent().getParent().getName();
-//          }
-//          println("%-25s  %15s  %30s  %s", clusterName, packageName,
-//              packageVersion, appDefPathStr);
-//        }
-//      } catch (IOException e) {
-//        log.debug("{} application definition path {} is not found.", clusterName, appDefPathStr);
-//      }
-//    }
-    return EXIT_SUCCESS;
-  }
-
-  private int actionPackageList() throws IOException {
-    Path pkgPath = sliderFileSystem.buildPackageDirPath(StringUtils.EMPTY,
-        StringUtils.EMPTY);
-    log.info("Package install path : {}", pkgPath);
-    FileSystem sfs = sliderFileSystem.getFileSystem();
-    if (!sfs.isDirectory(pkgPath)) {
-      log.info("No package(s) installed");
-      return EXIT_SUCCESS;
-    }
-    FileStatus[] fileStatus = sfs.listStatus(pkgPath);
-    boolean hasPackage = false;
-    StringBuilder sb = new StringBuilder();
-    sb.append("List of installed packages:\n");
-    for (FileStatus fstat : fileStatus) {
-      if (fstat.isDirectory()) {
-        sb.append("\t").append(fstat.getPath().getName());
-        sb.append("\n");
-        hasPackage = true;
-      }
-    }
-    if (hasPackage) {
-      println(sb.toString());
-    } else {
-      log.info("No package(s) installed");
-    }
-    return EXIT_SUCCESS;
-  }
-
-  private void createSummaryMetainfoFile(Path srcFile, Path destFile,
-      boolean overwrite) throws IOException {
-    FileSystem srcFs = srcFile.getFileSystem(getConfig());
-    try (InputStream inputStreamJson = SliderUtils
-        .getApplicationResourceInputStream(srcFs, srcFile, "metainfo.json");
-        InputStream inputStreamXml = SliderUtils
-            .getApplicationResourceInputStream(srcFs, srcFile, "metainfo.xml");) {
-      InputStream inputStream = null;
-      Path summaryFileInFs = null;
-      if (inputStreamJson != null) {
-        inputStream = inputStreamJson;
-        summaryFileInFs = new Path(destFile.getParent(), destFile.getName()
-            + ".metainfo.json");
-        log.info("Found JSON metainfo file in package");
-      } else if (inputStreamXml != null) {
-        inputStream = inputStreamXml;
-        summaryFileInFs = new Path(destFile.getParent(), destFile.getName()
-            + ".metainfo.xml");
-        log.info("Found XML metainfo file in package");
-      }
-      if (inputStream != null) {
-        try (FSDataOutputStream dataOutputStream = sliderFileSystem
-            .getFileSystem().create(summaryFileInFs, overwrite)) {
-          log.info("Creating summary metainfo file");
-          IOUtils.copy(inputStream, dataOutputStream);
-        }
-      }
-    }
-  }
-
-  private int actionPackageInstall(ActionPackageArgs actionPackageArgs)
-      throws YarnException, IOException {
-    requireArgumentSet(Arguments.ARG_NAME, actionPackageArgs.name);
-
-    Path srcFile = extractPackagePath(actionPackageArgs.packageURI);
-
-    Path pkgPath = sliderFileSystem.buildPackageDirPath(actionPackageArgs.name,
-        actionPackageArgs.version);
-    FileSystem fs = sliderFileSystem.getFileSystem();
-    if (!fs.exists(pkgPath)) {
-      fs.mkdirs(pkgPath);
-    }
-
-    Path fileInFs = new Path(pkgPath, srcFile.getName());
-    require(actionPackageArgs.replacePkg || !fs.exists(fileInFs),
-        E_PACKAGE_EXISTS +" at  %s. Use --replacepkg to overwrite.", fileInFs.toUri());
-
-    log.info("Installing package {} to {} (overwrite set to {})", srcFile,
-        fileInFs, actionPackageArgs.replacePkg);
-    fs.copyFromLocalFile(false, actionPackageArgs.replacePkg, srcFile, fileInFs);
-    createSummaryMetainfoFile(srcFile, fileInFs, actionPackageArgs.replacePkg);
-
-    String destPathWithHomeDir = Path
-        .getPathWithoutSchemeAndAuthority(fileInFs).toString();
-    String destHomeDir = Path.getPathWithoutSchemeAndAuthority(
-        fs.getHomeDirectory()).toString();
-    // a somewhat contrived approach to stripping out the home directory and any trailing
-    // separator; designed to work on windows and unix
-    String destPathWithoutHomeDir;
-    if (destPathWithHomeDir.startsWith(destHomeDir)) {
-      destPathWithoutHomeDir = destPathWithHomeDir.substring(destHomeDir.length());
-      if (destPathWithoutHomeDir.startsWith("/") || destPathWithoutHomeDir.startsWith("\\")) {
-        destPathWithoutHomeDir = destPathWithoutHomeDir.substring(1);
-      }
-    } else {
-      destPathWithoutHomeDir = destPathWithHomeDir;
-    }
-    log.info("Set " + AgentKeys.APP_DEF + " in your app config JSON to {}",
-        destPathWithoutHomeDir);
-
-    return EXIT_SUCCESS;
-  }
-
-  private Path extractPackagePath(String packageURI)
-      throws BadCommandArgumentsException {
-    require(isSet(packageURI), E_INVALID_APPLICATION_PACKAGE_LOCATION);
-    File pkgFile = new File(packageURI);
-    require(pkgFile.isFile(),
-        E_UNABLE_TO_READ_SUPPLIED_PACKAGE_FILE + ":  " + pkgFile.getAbsolutePath());
-    return new Path(pkgFile.toURI());
-  }
-
-  private int actionPackageDelete(ActionPackageArgs actionPackageArgs) throws
-      YarnException, IOException {
-    requireArgumentSet(Arguments.ARG_NAME, actionPackageArgs.name);
-
-    Path pkgPath = sliderFileSystem.buildPackageDirPath(actionPackageArgs.name,
-        actionPackageArgs.version);
-    FileSystem fs = sliderFileSystem.getFileSystem();
-    require(fs.exists(pkgPath), E_PACKAGE_DOES_NOT_EXIST +": %s ", pkgPath.toUri());
-    log.info("Deleting package {} at {}.", actionPackageArgs.name, pkgPath);
-
-    if(fs.delete(pkgPath, true)) {
-      log.info("Deleted package {} " + actionPackageArgs.name);
-      return EXIT_SUCCESS;
-    } else {
-      log.warn("Package deletion failed.");
-      return EXIT_NOT_FOUND;
-    }
-  }
-
   @Override
   public int actionUpdate(String clustername,
       AbstractClusterBuildingActionArgs buildInfo) throws
@@ -1594,7 +1271,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     String PLACEHOLDER_PATTERN = "\\$\\{[^{]+\\}";
     Pattern placeholderPattern = Pattern.compile(PLACEHOLDER_PATTERN);
     Matcher placeholderMatcher = placeholderPattern.matcher(env);
-    Map<String, String> placeholderKeyValueMap = new HashMap<String, String>();
+    Map<String, String> placeholderKeyValueMap = new HashMap<>();
     if (placeholderMatcher.find()) {
       String placeholderKey = placeholderMatcher.group();
       String systemKey = placeholderKey
@@ -1865,16 +1542,20 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
 
   @Override
   @VisibleForTesting
-  public void actionFlex(String appName, ActionFlexArgs args)
+  public int actionFlex(String appName, ActionFlexArgs args)
       throws YarnException, IOException {
-    Component component = new Component();
-    component.setNumberOfContainers(args.getNumberOfContainers());
-    if (StringUtils.isEmpty(args.getComponent())) {
-      component.setName("DEFAULT");
-    } else {
-      component.setName(args.getComponent());
+    Map<String, Long> componentCounts = new HashMap<>(args.getComponentMap()
+        .size());
+    for (Entry<String, String> entry : args.getComponentMap().entrySet()) {
+      long numberOfContainers = Long.parseLong(entry.getValue());
+      componentCounts.put(entry.getKey(), numberOfContainers);
     }
-    flex(appName, component);
+    // throw usage exception if no changes proposed
+    if (componentCounts.size() == 0) {
+      actionHelp(ACTION_FLEX);
+    }
+    flex(appName, componentCounts);
+    return EXIT_SUCCESS;
   }
 
   @Override
@@ -1966,19 +1647,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     return EXIT_SUCCESS;
   }
 
-  @Override
-  public String actionEcho(String name, ActionEchoArgs args) throws
-                                                             YarnException,
-                                                             IOException {
-    String message = args.message;
-    if (message == null) {
-      throw new BadCommandArgumentsException("missing message");
-    }
-    SliderClusterOperations clusterOps =
-      new SliderClusterOperations(bondToCluster(name));
-    return clusterOps.echo(message);
-  }
-
   /**
    * Find an instance of an application belonging to the current user.
    * @param appname application name
@@ -2099,7 +1767,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   }
 
   @Override
-  public void actionStop(String appName, ActionFreezeArgs freezeArgs)
+  public int actionStop(String appName, ActionFreezeArgs freezeArgs)
       throws YarnException, IOException {
     validateClusterName(appName);
     ApplicationReport app = findInstance(appName);
@@ -2112,7 +1780,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
         .ordinal()) {
       log.info("Application {} is in a terminated state {}", appName,
           app.getYarnApplicationState());
-      return;
+      return EXIT_SUCCESS;
     }
 
     try {
@@ -2127,6 +1795,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
           + " gracefully, forcefully kill the app.");
       yarnClient.killApplication(app.getApplicationId(), freezeArgs.message);
     }
+    return EXIT_SUCCESS;
   }
 
   @Override
@@ -2143,30 +1812,30 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     return 0;
   }
 
-  public long flex(String appName, Component component)
-      throws YarnException, IOException {
+  public Map<String, Long> flex(String appName, Map<String, Long>
+      componentCounts) throws YarnException, IOException {
     validateClusterName(appName);
     Path appDir = sliderFileSystem.buildClusterDirPath(appName);
     Path appJson = new Path(appDir, appName + ".json");
     Application persistedApp =
         jsonSerDeser.load(sliderFileSystem.getFileSystem(), appJson);
-    long original = 0;
-    boolean foundComponent = false;
+    Map<String, Long> original = new HashMap<>(componentCounts.size());
     for (Component persistedComp : persistedApp.getComponents()) {
-      if (persistedComp.getName().equals(component.getName())) {
-        original = persistedComp.getNumberOfContainers();
-        persistedComp.setNumberOfContainers(component.getNumberOfContainers());
-        foundComponent = true;
-        break;
+      String name = persistedComp.getName();
+      if (componentCounts.containsKey(persistedComp.getName())) {
+        original.put(name, persistedComp.getNumberOfContainers());
+        persistedComp.setNumberOfContainers(componentCounts.get(name));
       }
     }
-    if (!foundComponent) {
-      throw new YarnException("Component " + component.getName()
-          + " does not exist in app definition.");
+    if (original.size() < componentCounts.size()) {
+      componentCounts.keySet().removeAll(original.keySet());
+      throw new YarnException("Components " + componentCounts.keySet()
+          + " do not exist in app definition.");
     }
     jsonSerDeser
         .save(sliderFileSystem.getFileSystem(), appJson, persistedApp, true);
-    log.info("Updated app definition file for component " + component);
+    log.info("Updated app definition file for components " + componentCounts
+        .keySet());
 
     ApplicationReport instance = findInstance(appName);
     if (instance != null) {
@@ -2174,11 +1843,14 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
       SliderClusterProtocol appMaster = connect(instance);
       SliderClusterOperations clusterOps =
           new SliderClusterOperations(appMaster);
-      clusterOps.flex(component);
-      log.info(
-          "Application name = " + appName + ", Component name = " + component
-              .getName() + ", number of containers updated from " + original
-              + " to " + component.getNumberOfContainers());
+      clusterOps.flex(componentCounts);
+      for (Entry<String, Long> componentCount : componentCounts.entrySet()) {
+        log.info(
+            "Application name = " + appName + ", Component name = " +
+                componentCount.getKey() + ", number of containers updated " +
+                "from " + original.get(componentCount.getKey()) + " to " +
+                componentCount.getValue());
+      }
     } else {
       String message = "Application " + appName + "does not exist in RM. ";
       throw new YarnException(message);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
index 2bb224b..197a564 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
@@ -20,26 +20,18 @@ package org.apache.slider.client;
 
 import org.apache.hadoop.registry.client.api.RegistryOperations;
 import org.apache.hadoop.service.Service;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.types.NodeInformationList;
-import org.apache.slider.api.types.SliderInstanceDescription;
 import org.apache.slider.common.params.AbstractClusterBuildingActionArgs;
 import org.apache.slider.common.params.ActionAMSuicideArgs;
 import org.apache.slider.common.params.ActionClientArgs;
 import org.apache.slider.common.params.ActionDependencyArgs;
-import org.apache.slider.common.params.ActionDestroyArgs;
 import org.apache.slider.common.params.ActionDiagnosticArgs;
-import org.apache.slider.common.params.ActionEchoArgs;
 import org.apache.slider.common.params.ActionFlexArgs;
 import org.apache.slider.common.params.ActionFreezeArgs;
-import org.apache.slider.common.params.ActionInstallKeytabArgs;
-import org.apache.slider.common.params.ActionInstallPackageArgs;
 import org.apache.slider.common.params.ActionKeytabArgs;
 import org.apache.slider.common.params.ActionNodesArgs;
-import org.apache.slider.common.params.ActionPackageArgs;
 import org.apache.slider.common.params.ActionKillContainerArgs;
 import org.apache.slider.common.params.ActionListArgs;
 import org.apache.slider.common.params.ActionRegistryArgs;
@@ -53,7 +45,6 @@ import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.providers.AbstractClientProvider;
 
 import java.io.IOException;
-import java.util.Map;
 
 /**
  * Interface of those method calls in the slider API that are intended
@@ -63,8 +54,7 @@ import java.util.Map;
  */
 public interface SliderClientAPI extends Service {
 
-  void actionDestroy(String clustername) throws YarnException,
-      IOException;
+  int actionDestroy(String clustername) throws YarnException, IOException;
 
   /**
    * AM to commit an asynchronous suicide
@@ -82,18 +72,6 @@ public interface SliderClientAPI extends Service {
     throws SliderException;
 
   /**
-   * Upload keytab to a designated sub-directory of the user home directory
-   *
-   * @param installKeytabInfo the arguments needed to upload the keytab
-   * @throws YarnException Yarn problems
-   * @throws IOException other problems
-   * @throws BadCommandArgumentsException bad arguments.
-   * @deprecated use #actionKeytab
-   */
-  int actionInstallKeytab(ActionInstallKeytabArgs installKeytabInfo)
-      throws YarnException, IOException;
-
-  /**
    * Manage keytabs leveraged by slider
    *
    * @param keytabInfo the arguments needed to manage the keytab
@@ -105,17 +83,6 @@ public interface SliderClientAPI extends Service {
       throws YarnException, IOException;
 
   /**
-   * Upload application package to user home directory
-   *
-   * @param installPkgInfo the arguments needed to upload the package
-   * @throws YarnException Yarn problems
-   * @throws IOException other problems
-   * @throws BadCommandArgumentsException bad arguments.
-   */
-  int actionInstallPkg(ActionInstallPackageArgs installPkgInfo)
-      throws YarnException, IOException;
-
-  /**
    * Manage file resources leveraged by slider
    *
    * @param resourceInfo the arguments needed to manage the resource
@@ -138,17 +105,6 @@ public interface SliderClientAPI extends Service {
       throws IOException, YarnException;
 
   /**
-   * Managing slider application package
-   *
-   * @param pkgInfo the arguments needed to upload, delete or list the package
-   * @throws YarnException Yarn problems
-   * @throws IOException other problems
-   * @throws BadCommandArgumentsException bad arguments.
-   */
-  int actionPackage(ActionPackageArgs pkgInfo)
-      throws YarnException, IOException;
-
-  /**
    * Update the cluster specification
    *
    * @param clustername cluster name
@@ -179,7 +135,8 @@ public interface SliderClientAPI extends Service {
   int actionList(String clustername, ActionListArgs args) throws IOException, YarnException;
 
 
-  void actionFlex(String name, ActionFlexArgs args) throws YarnException, IOException;
+  int actionFlex(String name, ActionFlexArgs args) throws YarnException,
+      IOException;
 
   /**
    * Test for a cluster existing probe for a cluster of the given name existing
@@ -200,17 +157,6 @@ public interface SliderClientAPI extends Service {
       throws YarnException, IOException;
 
   /**
-   * Echo operation (not currently wired up to command line)
-   * @param name cluster name
-   * @param args arguments
-   * @return the echoed text
-   * @throws YarnException
-   * @throws IOException
-   */
-  String actionEcho(String name, ActionEchoArgs args)
-      throws YarnException, IOException;
-
-  /**
    * Status operation
    *
    * @param clustername cluster name
@@ -246,7 +192,7 @@ public interface SliderClientAPI extends Service {
    * @param freezeArgs arguments to the stop
    * @return EXIT_SUCCESS if the cluster was not running by the end of the operation
    */
-  void actionStop(String clustername, ActionFreezeArgs freezeArgs)
+  int actionStop(String clustername, ActionFreezeArgs freezeArgs)
       throws YarnException, IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java
index 3bb2af6..e89a660 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java
@@ -25,7 +25,6 @@ import org.apache.slider.api.SliderClusterProtocol;
 import org.apache.slider.api.StateValues;
 import org.apache.slider.api.proto.Messages;
 import org.apache.slider.api.resource.Application;
-import org.apache.slider.api.resource.Component;
 import org.apache.slider.api.types.ContainerInformation;
 import org.apache.slider.api.types.NodeInformation;
 import org.apache.slider.api.types.NodeInformationList;
@@ -44,6 +43,8 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
 
 import static org.apache.slider.api.types.RestTypeMarshalling.unmarshall;
 
@@ -283,12 +284,17 @@ public class SliderClusterOperations {
     return state;
   }
 
-  public void flex(Component component) throws IOException{
-    Messages.FlexComponentRequestProto request =
-        Messages.FlexComponentRequestProto.newBuilder()
-            .setNumberOfContainers(component.getNumberOfContainers().intValue())
-            .setName(component.getName()).build();
-        appMaster.flexComponent(request);
+  public void flex(Map<String, Long> componentCounts) throws IOException{
+    Messages.FlexComponentsRequestProto.Builder builder =
+        Messages.FlexComponentsRequestProto.newBuilder();
+    for (Entry<String, Long> componentCount : componentCounts.entrySet()) {
+      Messages.ComponentCountProto componentProto =
+          Messages.ComponentCountProto.newBuilder()
+              .setName(componentCount.getKey())
+              .setNumberOfContainers(componentCount.getValue()).build();
+      builder.addComponents(componentProto);
+    }
+    appMaster.flexComponents(builder.build());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderXmlConfKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderXmlConfKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderXmlConfKeys.java
index b666834..e881edf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderXmlConfKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderXmlConfKeys.java
@@ -18,8 +18,6 @@
 
 package org.apache.slider.common;
 
-import org.apache.hadoop.registry.client.api.RegistryConstants;
-
 /**
  * These are the keys that can be added to <code>conf/slider-client.xml</code>.
  */
@@ -105,23 +103,6 @@ public interface SliderXmlConfKeys {
    */
   String DEFAULT_DATA_DIRECTORY_PERMISSIONS = "750";
 
-  /**
-   *
-   * Use {@link RegistryConstants#KEY_REGISTRY_ZK_ROOT}
-   *
-   */
-  @Deprecated
-  String REGISTRY_PATH = "slider.registry.path";
-
-  /**
-   * 
-   * @Deprecated use {@link RegistryConstants#KEY_REGISTRY_ZK_QUORUM}
-   * 
-   */
-  @Deprecated
-  String REGISTRY_ZK_QUORUM = "slider.zookeeper.quorum";
-
-
   String IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH =
       "ipc.client.fallback-to-simple-auth-allowed";
   String HADOOP_HTTP_FILTER_INITIALIZERS =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractClusterBuildingActionArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractClusterBuildingActionArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractClusterBuildingActionArgs.java
index a2d4e38..c983a63 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractClusterBuildingActionArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractClusterBuildingActionArgs.java
@@ -19,6 +19,13 @@
 package org.apache.slider.common.params;
 
 import com.beust.jcommander.Parameter;
+import com.beust.jcommander.ParametersDelegate;
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
+
+import java.io.File;
+import java.util.List;
+import java.util.Map;
 
 /**
  * Abstract Action to build things; shares args across build and
@@ -26,6 +33,14 @@ import com.beust.jcommander.Parameter;
  */
 public abstract class AbstractClusterBuildingActionArgs
     extends AbstractActionArgs {
+  @Parameter(names = {ARG_APPDEF},
+      description = "Template application definition file in JSON format.")
+  public File appDef;
+
+  public File getAppDef() {
+    return appDef;
+  }
+
   @Parameter(names = {
       ARG_QUEUE }, description = "Queue to submit the application")
   public String queue;
@@ -33,4 +48,42 @@ public abstract class AbstractClusterBuildingActionArgs
   @Parameter(names = {
       ARG_LIFETIME }, description = "Lifetime of the application from the time of request")
   public long lifetime;
+
+  @ParametersDelegate
+  public ComponentArgsDelegate componentDelegate = new ComponentArgsDelegate();
+
+  @ParametersDelegate
+  public OptionArgsDelegate optionsDelegate =
+      new OptionArgsDelegate();
+
+
+  public Map<String, String> getOptionsMap() throws
+      BadCommandArgumentsException {
+    return optionsDelegate.getOptionsMap();
+  }
+
+  /**
+   * Get the role heap mapping (may be empty, but never null).
+   * @return role heap mapping
+   * @throws BadCommandArgumentsException parse problem
+   */
+  public Map<String, Map<String, String>> getCompOptionMap() throws
+      BadCommandArgumentsException {
+    return optionsDelegate.getCompOptionMap();
+  }
+
+  @VisibleForTesting
+  public List<String> getComponentTuples() {
+    return componentDelegate.getComponentTuples();
+  }
+
+  /**
+   * Get the role mapping (may be empty, but never null).
+   * @return role mapping
+   * @throws BadCommandArgumentsException parse problem
+   */
+  public Map<String, String> getComponentMap() throws
+      BadCommandArgumentsException {
+    return componentDelegate.getComponentMap();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionBuildArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionBuildArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionBuildArgs.java
new file mode 100644
index 0000000..57e4b02
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionBuildArgs.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.common.params;
+
+import com.beust.jcommander.Parameters;
+
+@Parameters(commandNames = {SliderActions.ACTION_BUILD},
+            commandDescription = SliderActions.DESCRIBE_ACTION_BUILD)
+
+public class ActionBuildArgs extends AbstractClusterBuildingActionArgs {
+
+  @Override
+  public String getActionName() {
+    return SliderActions.ACTION_BUILD;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionCreateArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionCreateArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionCreateArgs.java
index c8cac65..4cc1077 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionCreateArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionCreateArgs.java
@@ -18,25 +18,13 @@
 
 package org.apache.slider.common.params;
 
-import com.beust.jcommander.Parameter;
 import com.beust.jcommander.Parameters;
-import com.beust.jcommander.ParametersDelegate;
-
-import java.io.File;
 
 @Parameters(commandNames = {SliderActions.ACTION_CREATE},
             commandDescription = SliderActions.DESCRIBE_ACTION_CREATE)
 
 public class ActionCreateArgs extends AbstractClusterBuildingActionArgs {
 
-  @Parameter(names = {ARG_APPDEF},
-      description = "Template application definition file in JSON format.")
-  public File appDef;
-
-  public File getAppDef() {
-    return appDef;
-  }
-
   @Override
   public String getActionName() {
     return SliderActions.ACTION_CREATE;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionEchoArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionEchoArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionEchoArgs.java
deleted file mode 100644
index d05f10b..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionEchoArgs.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.common.params;
-
-import com.beust.jcommander.Parameter;
-
-public class ActionEchoArgs extends AbstractActionArgs {
-  @Override
-  public String getActionName() {
-    return SliderActions.ACTION_ECHO;
-  }
-
-  @Parameter(names = {ARG_MESSAGE},
-             description = "message to echo")
-  public String message;
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionFlexArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionFlexArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionFlexArgs.java
index c565484..21cb609 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionFlexArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionFlexArgs.java
@@ -18,31 +18,38 @@
 
 package org.apache.slider.common.params;
 
-import com.beust.jcommander.Parameter;
 import com.beust.jcommander.Parameters;
+import com.beust.jcommander.ParametersDelegate;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
+
+import java.util.List;
+import java.util.Map;
 
 @Parameters(commandNames = {SliderActions.ACTION_FLEX},
             commandDescription = SliderActions.DESCRIBE_ACTION_FLEX)
 
 public class ActionFlexArgs extends AbstractActionArgs {
 
-  @Parameter(names = {ARG_COMPONENT},
-      description = "component name")
-  String componentName;
-
-  @Parameter(names = {ARG_COUNT},
-      description = "number of containers>")
-  long numberOfContainers;
-
   @Override
   public String getActionName() {
     return SliderActions.ACTION_FLEX;
   }
 
-  public String getComponent() {
-    return componentName;
+  @ParametersDelegate
+  public ComponentArgsDelegate componentDelegate = new ComponentArgsDelegate();
+
+  /**
+   * Get the component mapping (may be empty, but never null)
+   * @return mapping
+   * @throws BadCommandArgumentsException parse problem
+   */
+  public Map<String, String> getComponentMap() throws
+      BadCommandArgumentsException {
+    return componentDelegate.getComponentMap();
   }
-  public long getNumberOfContainers() {
-    return numberOfContainers;
+
+  public List<String> getComponentTuples() {
+    return componentDelegate.getComponentTuples();
   }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionInstallKeytabArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionInstallKeytabArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionInstallKeytabArgs.java
deleted file mode 100644
index 4cfb889..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionInstallKeytabArgs.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.common.params;
-
-import com.beust.jcommander.Parameter;
-import com.beust.jcommander.Parameters;
-
-@Parameters(commandNames = {SliderActions.ACTION_INSTALL_KEYTAB},
-            commandDescription = SliderActions.DESCRIBE_ACTION_INSTALL_KEYTAB)
-
-public class ActionInstallKeytabArgs extends AbstractActionArgs {
-  
-  @Override
-  public String getActionName() {
-    return SliderActions.ACTION_INSTALL_KEYTAB;
-  }
-
-  @Parameter(names = {ARG_KEYTAB},
-             description = "Path to keytab on local disk")
-  public String keytabUri;
-
-  @Parameter(names = {ARG_FOLDER},
-             description = "The name of the folder in which to store the keytab")
-  public String folder;
-
-  @Parameter(names = {ARG_OVERWRITE}, description = "Overwrite existing keytab")
-  public boolean overwrite = false;
-
-  /**
-   * Get the min #of params expected
-   * @return the min number of params in the {@link #parameters} field
-   */
-  public int getMinParams() {
-    return 0;
-  }
-
-  @Override
-  public int getMaxParams() {
-    return 3;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionInstallPackageArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionInstallPackageArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionInstallPackageArgs.java
deleted file mode 100644
index 646e795..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionInstallPackageArgs.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.common.params;
-
-import com.beust.jcommander.Parameter;
-import com.beust.jcommander.Parameters;
-import com.beust.jcommander.ParametersDelegate;
-
-@Parameters(commandNames = {SliderActions.ACTION_INSTALL_PACKAGE},
-            commandDescription = SliderActions.DESCRIBE_ACTION_INSTALL_PACKAGE)
-
-public class ActionInstallPackageArgs extends AbstractActionArgs {
-  
-  @Override
-  public String getActionName() {
-    return SliderActions.ACTION_INSTALL_PACKAGE;
-  }
-
-  @Parameter(names = {ARG_PACKAGE},
-             description = "Path to app package on local disk")
-  public String packageURI;
-
-  @Parameter(names = {ARG_NAME},
-             description = "The type of the package")
-  public String name;
-
-  @Parameter(names = {ARG_REPLACE_PKG}, description = "Overwrite existing package")
-  public boolean replacePkg = false;
-
-  /**
-   * Get the min #of params expected
-   * @return the min number of params in the {@link #parameters} field
-   */
-  public int getMinParams() {
-    return 0;
-  }
-
-  @Override
-  public int getMaxParams() {
-    return 1;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionKeytabArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionKeytabArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionKeytabArgs.java
index 32b1d2b..9a708ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionKeytabArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionKeytabArgs.java
@@ -26,13 +26,6 @@ import com.beust.jcommander.Parameters;
 
 public class ActionKeytabArgs extends AbstractActionArgs {
 
-  public ActionKeytabArgs(ActionInstallKeytabArgs installKeytabInfo) {
-    this.install = true;
-    this.overwrite = installKeytabInfo.overwrite;
-    this.keytab = installKeytabInfo.keytabUri;
-    this.folder = installKeytabInfo.folder;
-  }
-
   public ActionKeytabArgs() {
     super();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionPackageArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionPackageArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionPackageArgs.java
deleted file mode 100644
index 4833934..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionPackageArgs.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.common.params;
-
-import com.beust.jcommander.Parameter;
-import com.beust.jcommander.Parameters;
-
-@Parameters(commandNames = {SliderActions.ACTION_PACKAGE},
-            commandDescription = SliderActions.DESCRIBE_ACTION_PACKAGE)
-
-public class ActionPackageArgs extends AbstractActionArgs {
-
-  @Override
-  public String getActionName() {
-    return SliderActions.ACTION_PACKAGE;
-  }
-
-  @Parameter(names = {ARG_INSTALL},
-      description = "Install package in the sub-folder 'package' of the user's Slider base directory")
-  public boolean install;
-
-  @Parameter(names = {ARG_PKGDELETE},
-      description = "Delete package operation")
-  public boolean delete;
-
-  @Parameter(names = {ARG_PKGLIST},
-      description = "List of package(s) installed")
-  public boolean list;
-
-  @Parameter(names = {ARG_PKGINSTANCES},
-      description = "Lists all application instances referring to package")
-  public boolean instances;
-
-  @Parameter(names = {ARG_PACKAGE},
-             description = "Path to app package on local disk")
-  public String packageURI;
-
-  @Parameter(names = {ARG_NAME},
-             description = "Package name")
-  public String name;
-
-  @Parameter(names = {ARG_VERSION}, description = "Package version")
-  public String version;
-
-  @Parameter(names = {ARG_REPLACE_PKG}, 
-      description = "Overwrite existing package")
-  public boolean replacePkg = false;
-
-  @Parameter(names = {ARG_OUTPUT, ARG_OUTPUT_SHORT},
-      description = "Output file for package data")
-  public String out;
-
-  /**
-   * Get the min #of params expected
-   * @return the min number of params in the {@link #parameters} field
-   */
-  public int getMinParams() {
-    return 0;
-  }
-
-  @Override
-  public int getMaxParams() {
-    return 1;
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: YARN-6335. Port slider's groovy unit tests to yarn native services. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnMiniClusterTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnMiniClusterTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnMiniClusterTestBase.java
new file mode 100644
index 0000000..746a0ec
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnMiniClusterTestBase.java
@@ -0,0 +1,832 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.utils;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.service.ServiceOperations;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.MiniYARNCluster;
+import org.apache.slider.client.SliderClient;
+import org.apache.slider.common.SliderExitCodes;
+import org.apache.slider.common.SliderXmlConfKeys;
+import org.apache.slider.common.params.ActionFreezeArgs;
+import org.apache.slider.common.params.Arguments;
+import org.apache.slider.common.tools.Duration;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.ErrorStrings;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.core.main.ServiceLauncher;
+import org.apache.slider.server.appmaster.SliderAppMaster;
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import static org.apache.slider.utils.KeysForTests.*;
+
+/**
+ * Base class for mini cluster tests -creates a field for the
+ * mini yarn cluster.
+ */
+public abstract class YarnMiniClusterTestBase extends SliderTestBase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(YarnMiniClusterTestBase.class);
+
+  /**
+   * Mini YARN cluster only.
+   */
+  public static final int CLUSTER_GO_LIVE_TIME = 3 * 60 * 1000;
+  public static final int CLUSTER_STOP_TIME = 1 * 60 * 1000;
+  public static final int SIGTERM = -15;
+  public static final int SIGKILL = -9;
+  public static final int SIGSTOP = -17;
+  public static final String NO_ARCHIVE_DEFINED = "Archive configuration " +
+      "option not set: ";
+  /**
+   * RAM for the YARN containers: {@value}.
+   */
+  public static final String YRAM = "256";
+  public static final String FIFO_SCHEDULER = "org.apache.hadoop.yarn.server" +
+      ".resourcemanager.scheduler.fifo.FifoScheduler";
+  public static final YarnConfiguration SLIDER_CONFIG =
+      SliderUtils.createConfiguration();
+  private static boolean killSupported;
+
+  static {
+    SLIDER_CONFIG.setInt(SliderXmlConfKeys.KEY_AM_RESTART_LIMIT, 1);
+    SLIDER_CONFIG.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 100);
+    SLIDER_CONFIG.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
+    SLIDER_CONFIG.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
+    SLIDER_CONFIG
+        .setBoolean(SliderXmlConfKeys.KEY_SLIDER_AM_DEPENDENCY_CHECKS_DISABLED,
+            true);
+    SLIDER_CONFIG
+        .setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 1);
+  }
+
+
+  private int thawWaitTime = DEFAULT_THAW_WAIT_TIME_SECONDS * 1000;
+  private int freezeWaitTime = DEFAULT_TEST_FREEZE_WAIT_TIME_SECONDS * 1000;
+  private int sliderTestTimeout = DEFAULT_TEST_TIMEOUT_SECONDS * 1000;
+  private boolean teardownKillall = DEFAULT_TEARDOWN_KILLALL;
+
+  /**
+   * This is set in a system property.
+   */
+  @Rule
+  public Timeout testTimeout = new Timeout(
+      getTimeOptionMillis(getTestConfiguration(),
+          KEY_TEST_TIMEOUT,
+          DEFAULT_TEST_TIMEOUT_SECONDS * 1000)
+  );
+  private MiniDFSCluster hdfsCluster;
+  private MiniYARNCluster miniCluster;
+  private boolean switchToImageDeploy = false;
+  private boolean imageIsRemote = false;
+  private URI remoteImageURI;
+  private List<SliderClient> clustersToTeardown = new ArrayList<>();
+  private int clusterCount = 1;
+
+  /**
+   * Clent side test: validate system env before launch.
+   */
+  @BeforeClass
+  public static void checkClientEnv() throws IOException, SliderException {
+    SliderUtils.validateSliderClientEnvironment(null);
+  }
+
+  /**
+   * Work out if kill is supported.
+   */
+  @BeforeClass
+  public static void checkKillSupport() {
+    killSupported = !Shell.WINDOWS;
+  }
+
+  protected static boolean getKillSupported() {
+    return killSupported;
+  }
+
+  protected MiniYARNCluster getMiniCluster() {
+    return miniCluster;
+  }
+
+  /**
+   * Probe for the disks being healthy in a mini cluster. Only the first
+   * NM is checked.
+   *
+   * @param miniCluster
+   */
+  public static void assertMiniClusterDisksHealthy(
+      MiniYARNCluster miniCluster) {
+    boolean healthy = miniCluster.getNodeManager(
+        0).getNodeHealthChecker().getDiskHandler().areDisksHealthy();
+    assertTrue("Disks on test cluster unhealthy —may be full", healthy);
+  }
+
+  /**
+   * Inner work building the mini dfs cluster.
+   *
+   * @param name
+   * @param conf
+   * @return
+   */
+  public static MiniDFSCluster buildMiniHDFSCluster(
+      String name,
+      YarnConfiguration conf) throws IOException {
+    assertNativeLibrariesPresent();
+
+    File baseDir = new File("./target/hdfs", name).getAbsoluteFile();
+    //use file: to rm it recursively
+    FileUtil.fullyDelete(baseDir);
+    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
+    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
+
+    MiniDFSCluster cluster = builder.build();
+    return cluster;
+  }
+
+  public static String buildFsDefaultName(MiniDFSCluster miniDFSCluster) {
+    if (miniDFSCluster != null) {
+      return String.format("hdfs://localhost:%d/",
+          miniDFSCluster.getNameNodePort());
+    } else {
+      return "file:///";
+    }
+  }
+
+  /**
+   * Assert that an operation failed because a cluster is in use.
+   *
+   * @param e exception
+   */
+  public static void assertFailureClusterInUse(SliderException e) {
+    assertExceptionDetails(e,
+        SliderExitCodes.EXIT_APPLICATION_IN_USE,
+        ErrorStrings.E_CLUSTER_RUNNING);
+  }
+
+  protected String buildClustername(String clustername) {
+    if (SliderUtils.isSet(clustername)) {
+      return clustername;
+    } else {
+      return createClusterName();
+    }
+  }
+
+  /**
+   * Create the cluster name from the method name and an auto-incrementing
+   * counter.
+   *
+   * @return a cluster name
+   */
+  protected String createClusterName() {
+    String base = methodName.getMethodName().toLowerCase(Locale.ENGLISH);
+    if (clusterCount++ > 1) {
+      return String.format("%s-%d", base, clusterCount);
+    }
+    return base;
+  }
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    Configuration testConf = getTestConfiguration();
+    thawWaitTime = getTimeOptionMillis(testConf,
+        KEY_TEST_THAW_WAIT_TIME,
+        thawWaitTime);
+    freezeWaitTime = getTimeOptionMillis(testConf,
+        KEY_TEST_FREEZE_WAIT_TIME,
+        freezeWaitTime);
+    sliderTestTimeout = getTimeOptionMillis(testConf,
+        KEY_TEST_TIMEOUT,
+        sliderTestTimeout);
+    teardownKillall =
+        testConf.getBoolean(KEY_TEST_TEARDOWN_KILLALL,
+            teardownKillall);
+
+  }
+
+  @After
+  public void teardown() {
+    describe("teardown");
+    stopRunningClusters();
+    stopMiniCluster();
+  }
+
+  protected void addToTeardown(SliderClient client) {
+    clustersToTeardown.add(client);
+  }
+
+  protected void addToTeardown(ServiceLauncher<SliderClient> launcher) {
+    if (launcher != null) {
+      SliderClient sliderClient = launcher.getService();
+      if (sliderClient != null) {
+        addToTeardown(sliderClient);
+      }
+    }
+  }
+
+  /**
+   * Kill any java process with the given grep pattern.
+   *
+   * @param grepString string to grep for
+   */
+  public int killJavaProcesses(String grepString, int signal)
+      throws IOException, InterruptedException {
+
+    String[] commandString;
+    if (!Shell.WINDOWS) {
+      String killCommand = String.format(
+          "jps -l| grep %s | awk '{print $1}' | xargs kill %d", grepString,
+          signal);
+      LOG.info("Command command = {}", killCommand);
+
+      commandString = new String[]{"bash", "-c", killCommand};
+    } else {
+      // windows
+      if (!killSupported) {
+        return -1;
+      }
+
+      // "jps -l | grep "String" | awk "{print $1}" | xargs -n 1 taskkill /PID"
+      String killCommand = String.format(
+          "jps -l | grep %s | gawk '{print $1}' | xargs -n 1 taskkill /f " +
+              "/PID", grepString);
+      commandString = new String[]{"CMD", "/C", killCommand};
+    }
+
+    Process command = new ProcessBuilder(commandString).start();
+    int exitCode = command.waitFor();
+
+    logStdOutStdErr(command);
+    return exitCode;
+  }
+
+  /**
+   * Kill all processes which match one of the list of grepstrings.
+   *
+   * @param greps
+   * @param signal
+   */
+  public void killJavaProcesses(List<String> greps, int signal)
+      throws IOException, InterruptedException {
+    for (String grep : greps) {
+      killJavaProcesses(grep, signal);
+    }
+  }
+
+  protected YarnConfiguration getConfiguration() {
+    return SLIDER_CONFIG;
+  }
+
+  /**
+   * Stop any running cluster that has been added.
+   */
+  public void stopRunningClusters() {
+    for (SliderClient client : clustersToTeardown) {
+      try {
+        maybeStopCluster(client, "", "Teardown at end of test case", true);
+      } catch (Exception e) {
+        LOG.warn("While stopping cluster " + e, e);
+      }
+    }
+  }
+
+  public void stopMiniCluster() {
+    Log commonslog = LogFactory.getLog(YarnMiniClusterTestBase.class);
+    ServiceOperations.stopQuietly(commonslog, miniCluster);
+    if (hdfsCluster != null) {
+      hdfsCluster.shutdown();
+    }
+  }
+
+  /**
+   * Create and start a minicluster.
+   *
+   * @param name             cluster/test name; if empty one is created from
+   *                         the junit method
+   * @param conf             configuration to use
+   * @param noOfNodeManagers #of NMs
+   * @param numLocalDirs     #of local dirs
+   * @param numLogDirs       #of log dirs
+   * @param startHDFS        create an HDFS mini cluster
+   * @return the name of the cluster
+   */
+  protected String createMiniCluster(String name,
+      YarnConfiguration conf,
+      int noOfNodeManagers,
+      int numLocalDirs,
+      int numLogDirs,
+      boolean startHDFS) throws IOException {
+    assertNativeLibrariesPresent();
+    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
+    conf.set(YarnConfiguration.RM_SCHEDULER, FIFO_SCHEDULER);
+    patchDiskCapacityLimits(conf);
+    SliderUtils.patchConfiguration(conf);
+    name = buildClustername(name);
+    miniCluster = new MiniYARNCluster(
+        name,
+        noOfNodeManagers,
+        numLocalDirs,
+        numLogDirs);
+    miniCluster.init(conf);
+    miniCluster.start();
+    // health check
+    assertMiniClusterDisksHealthy(miniCluster);
+    if (startHDFS) {
+      createMiniHDFSCluster(name, conf);
+    }
+    return name;
+  }
+
+  public void patchDiskCapacityLimits(YarnConfiguration conf) {
+    conf.setFloat(
+        YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
+        99.0f);
+    conf.setInt(SliderXmlConfKeys.DFS_NAMENODE_DU_RESERVED_KEY,
+        2 * 1024 * 1024);
+    conf.setBoolean("yarn.nodemanager.disk-health-checker.enable", false);
+  }
+
+  /**
+   * Create a mini HDFS cluster and save it to the hdfsClusterField.
+   *
+   * @param name
+   * @param conf
+   */
+  public void createMiniHDFSCluster(String name, YarnConfiguration conf)
+      throws IOException {
+    hdfsCluster = buildMiniHDFSCluster(name, conf);
+  }
+
+  /**
+   * Launch the client with the specific args against the MiniMR cluster
+   * launcher i.e. expected to have successfully completed.
+   *
+   * @param conf configuration
+   * @param args arg list
+   * @return the return code
+   */
+  protected ServiceLauncher<SliderClient> launchClientAgainstMiniMR(
+      Configuration conf,
+      List args)
+      throws Throwable {
+    ServiceLauncher<SliderClient> launcher =
+        launchClientNoExitCodeCheck(conf, args);
+    int exited = launcher.getServiceExitCode();
+    if (exited != 0) {
+      throw new SliderException(exited, "Launch failed with exit code " +
+          exited);
+    }
+    return launcher;
+  }
+
+  /**
+   * Launch the client with the specific args against the MiniMR cluster
+   * without any checks for exit codes.
+   *
+   * @param conf configuration
+   * @param args arg list
+   * @return the return code
+   */
+  public ServiceLauncher<SliderClient> launchClientNoExitCodeCheck(
+      Configuration conf,
+      List args) throws Throwable {
+    assertNotNull(miniCluster);
+    return launchClientAgainstRM(getRMAddr(), args, conf);
+  }
+
+  /**
+   * Kill all Slider Services.
+   *
+   * @param signal
+   */
+  public int killAM(int signal) throws IOException, InterruptedException {
+    return killJavaProcesses(SliderAppMaster.SERVICE_CLASSNAME_SHORT, signal);
+  }
+
+  public void logStdOutStdErr(Process p) throws IOException {
+    try (BufferedReader br = new BufferedReader(new InputStreamReader(p
+        .getInputStream()))) {
+      String line = br.readLine();
+      while (line != null) {
+        LOG.info(line);
+        line = br.readLine();
+      }
+    }
+    try (BufferedReader br = new BufferedReader(new InputStreamReader(p
+        .getErrorStream()))) {
+      String line = br.readLine();
+      while (line != null) {
+        LOG.error(line);
+        line = br.readLine();
+      }
+    }
+  }
+
+  /**
+   * List any java process.
+   */
+  public void lsJavaProcesses() throws InterruptedException, IOException {
+    Process bash = new ProcessBuilder("jps", "-v").start();
+    bash.waitFor();
+    logStdOutStdErr(bash);
+  }
+
+  public YarnConfiguration getTestConfiguration() {
+    YarnConfiguration conf = getConfiguration();
+    conf.addResource(SLIDER_TEST_XML);
+    return conf;
+  }
+
+  protected String getRMAddr() {
+    assertNotNull(miniCluster);
+    String addr = miniCluster.getConfig().get(YarnConfiguration.RM_ADDRESS);
+    assertNotNull(addr != null);
+    assertNotEquals("", addr);
+    return addr;
+  }
+
+  /**
+   * Return the default filesystem, which is HDFS if the miniDFS cluster is
+   * up, file:// if not.
+   *
+   * @return a filesystem string to pass down
+   */
+  protected String getFsDefaultName() {
+    return buildFsDefaultName(hdfsCluster);
+  }
+
+  /**
+   * Delete with some pauses and backoff; designed to handle slow delete
+   * operation in windows.
+   */
+  public void rigorousDelete(
+      SliderFileSystem sliderFileSystem,
+      Path path, long timeout) throws IOException, SliderException {
+
+    if (path.toUri().getScheme() == "file") {
+      File dir = new File(path.toUri().getPath());
+      rigorousDelete(dir, timeout);
+    } else {
+      Duration duration = new Duration(timeout);
+      duration.start();
+      FileSystem dfs = sliderFileSystem.getFileSystem();
+      boolean deleted = false;
+      while (!deleted && !duration.getLimitExceeded()) {
+        dfs.delete(path, true);
+        deleted = !dfs.exists(path);
+        if (!deleted) {
+          try {
+            Thread.sleep(1000);
+          } catch (InterruptedException e) {
+            LOG.info("ignoring interrupted sleep");
+          }
+        }
+      }
+    }
+    sliderFileSystem.verifyDirectoryNonexistent(path);
+  }
+
+  /**
+   * Delete with some pauses and backoff; designed to handle slow delete
+   * operation in windows.
+   *
+   * @param dir     dir to delete
+   * @param timeout timeout in millis
+   */
+  public void rigorousDelete(File dir, long timeout) throws IOException {
+    Duration duration = new Duration(timeout);
+    duration.start();
+    boolean deleted = false;
+    while (!deleted && !duration.getLimitExceeded()) {
+      FileUtils.deleteQuietly(dir);
+      deleted = !dir.exists();
+      if (!deleted) {
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
+          LOG.info("ignoring interrupted sleep");
+        }
+      }
+    }
+    if (!deleted) {
+      // noisy delete raises an IOE
+      FileUtils.deleteDirectory(dir);
+    }
+  }
+
+  /**
+   * Add arguments to launch Slider with.
+   * <p>
+   * Extra arguments are added after standard arguments and before roles.
+   *
+   * @return additional arguments to launch Slider with
+   */
+  protected List<String> getExtraCLIArgs() {
+    return new ArrayList<>();
+  }
+
+  public String getConfDir() throws FileNotFoundException {
+    return getResourceConfDirURI();
+  }
+
+  /**
+   * Get the key for the application.
+   *
+   * @return
+   */
+  public String getApplicationHomeKey() {
+    failNotImplemented();
+    return null;
+  }
+
+  /**
+   * Get the archive path -which defaults to the local one.
+   *
+   * @return
+   */
+  public String getArchivePath() {
+    return getLocalArchive();
+  }
+
+  /**
+   * Get the local archive -the one defined in the test configuration.
+   *
+   * @return a possibly null/empty string
+   */
+  public final String getLocalArchive() {
+    return getTestConfiguration().getTrimmed(getArchiveKey());
+  }
+
+  /**
+   * Get the key for archives in tests.
+   *
+   * @return
+   */
+  public String getArchiveKey() {
+    failNotImplemented();
+    return null;
+  }
+
+  /**
+   * Merge a k-v pair into a simple k=v string; simple utility.
+   *
+   * @param key key
+   * @param val value
+   * @return the string to use after a -D option
+   */
+  public String define(String key, String val) {
+    return String.format("%s=%s", key, val);
+  }
+
+  public void assumeTestEnabled(boolean flag) {
+    assume(flag, "test disabled");
+  }
+
+  public void assumeArchiveDefined() {
+    String archive = getArchivePath();
+    boolean defined = archive != null && archive != "";
+    if (!defined) {
+      LOG.warn(NO_ARCHIVE_DEFINED + getArchiveKey());
+    }
+    assume(defined, NO_ARCHIVE_DEFINED + getArchiveKey());
+  }
+
+  /**
+   * Assume that application home is defined. This does not check that the
+   * path is valid -that is expected to be a failure on tests that require
+   * application home to be set.
+   */
+  public void assumeApplicationHome() {
+    String applicationHome = getApplicationHome();
+    assume(applicationHome != null && applicationHome != "",
+        "Application home dir option not set " + getApplicationHomeKey());
+  }
+
+  public String getApplicationHome() {
+    return getTestConfiguration().getTrimmed(getApplicationHomeKey());
+  }
+
+  public List<String> getImageCommands() {
+    if (switchToImageDeploy) {
+      // its an image that had better be defined
+      assertNotNull(getArchivePath());
+      if (!imageIsRemote) {
+        // its not remote, so assert it exists
+        File f = new File(getArchivePath());
+        assertTrue(f.exists());
+        return Arrays.asList(Arguments.ARG_IMAGE, f.toURI().toString());
+      } else {
+        assertNotNull(remoteImageURI);
+
+        // if it is remote, then its whatever the archivePath property refers to
+        return Arrays.asList(Arguments.ARG_IMAGE, remoteImageURI.toString());
+      }
+    } else {
+      assertNotNull(getApplicationHome());
+      assertTrue(new File(getApplicationHome()).exists());
+      return Arrays.asList(Arguments.ARG_APP_HOME, getApplicationHome());
+    }
+  }
+
+  /**
+   * Get the resource configuration dir in the source tree.
+   *
+   * @return
+   */
+  public File getResourceConfDir() throws FileNotFoundException {
+    File f = new File(getTestConfigurationPath()).getAbsoluteFile();
+    if (!f.exists()) {
+      throw new FileNotFoundException(
+          "Resource configuration directory " + f + " not found");
+    }
+    return f;
+  }
+
+  public String getTestConfigurationPath() {
+    failNotImplemented();
+    return null;
+  }
+
+  /**
+   * Get a URI string to the resource conf dir that is suitable for passing down
+   * to the AM -and works even when the default FS is hdfs.
+   */
+  public String getResourceConfDirURI() throws FileNotFoundException {
+    return getResourceConfDir().getAbsoluteFile().toURI().toString();
+  }
+
+  /**
+   * Log an application report.
+   *
+   * @param report
+   */
+  public void logReport(ApplicationReport report) {
+    LOG.info(SliderUtils.reportToString(report));
+  }
+
+  /**
+   * Stop the cluster via the stop action -and wait for
+   * {@link #CLUSTER_STOP_TIME} for the cluster to stop. If it doesn't
+   *
+   * @param sliderClient client
+   * @param clustername  cluster
+   * @return the exit code
+   */
+  public int clusterActionFreeze(SliderClient sliderClient, String clustername,
+      String message, boolean force)
+      throws IOException, YarnException {
+    LOG.info("Stopping cluster {}: {}", clustername, message);
+    ActionFreezeArgs freezeArgs = new ActionFreezeArgs();
+    freezeArgs.setWaittime(CLUSTER_STOP_TIME);
+    freezeArgs.message = message;
+    freezeArgs.force = force;
+    int exitCode = sliderClient.actionStop(clustername,
+        freezeArgs);
+    if (exitCode != 0) {
+      LOG.warn("Cluster stop failed with error code {}", exitCode);
+    }
+    return exitCode;
+  }
+
+  /**
+   * Teardown-time cluster termination; will stop the cluster iff the client
+   * is not null.
+   *
+   * @param sliderClient client
+   * @param clustername  name of cluster to teardown
+   * @return
+   */
+  public int maybeStopCluster(
+      SliderClient sliderClient,
+      String clustername,
+      String message,
+      boolean force) throws IOException, YarnException {
+    if (sliderClient != null) {
+      if (SliderUtils.isUnset(clustername)) {
+        clustername = sliderClient.getDeployedClusterName();
+      }
+      //only stop a cluster that exists
+      if (SliderUtils.isSet(clustername)) {
+        return clusterActionFreeze(sliderClient, clustername, message, force);
+      }
+    }
+    return 0;
+  }
+
+  public String roleMapToString(Map<String, Integer> roles) {
+    StringBuilder builder = new StringBuilder();
+    for (Entry<String, Integer> entry : roles.entrySet()) {
+      builder.append(entry.getKey());
+      builder.append("->");
+      builder.append(entry.getValue());
+      builder.append(" ");
+    }
+    return builder.toString();
+  }
+
+  /**
+   * Turn on test runs against a copy of the archive that is
+   * uploaded to HDFS -this method copies up the
+   * archive then switches the tests into archive mode.
+   */
+  public void enableTestRunAgainstUploadedArchive() throws IOException {
+    Path remotePath = copyLocalArchiveToHDFS(getLocalArchive());
+    // image mode
+    switchToRemoteImageDeploy(remotePath);
+  }
+
+  /**
+   * Switch to deploying a remote image.
+   *
+   * @param remotePath the remote path to use
+   */
+  public void switchToRemoteImageDeploy(Path remotePath) {
+    switchToImageDeploy = true;
+    imageIsRemote = true;
+    remoteImageURI = remotePath.toUri();
+  }
+
+  /**
+   * Copy a local archive to HDFS.
+   *
+   * @param localArchive local archive
+   * @return the path of the uploaded image
+   */
+  public Path copyLocalArchiveToHDFS(String localArchive) throws IOException {
+    assertNotNull(localArchive);
+    File localArchiveFile = new File(localArchive);
+    assertTrue(localArchiveFile.exists());
+    assertNotNull(hdfsCluster);
+    Path remoteUnresolvedArchive = new Path(localArchiveFile.getName());
+    assertTrue(FileUtil.copy(
+        localArchiveFile,
+        hdfsCluster.getFileSystem(),
+        remoteUnresolvedArchive,
+        false,
+        getTestConfiguration()));
+    Path remotePath = hdfsCluster.getFileSystem().resolvePath(
+        remoteUnresolvedArchive);
+    return remotePath;
+  }
+
+  /**
+   * Create a SliderFileSystem instance bonded to the running FS.
+   * The YARN cluster must be up and running already
+   *
+   * @return
+   */
+  public SliderFileSystem createSliderFileSystem()
+      throws URISyntaxException, IOException {
+    FileSystem dfs =
+        FileSystem.get(new URI(getFsDefaultName()), getConfiguration());
+    SliderFileSystem hfs = new SliderFileSystem(dfs, getConfiguration());
+    return hfs;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnZKMiniClusterTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnZKMiniClusterTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnZKMiniClusterTestBase.java
new file mode 100644
index 0000000..322b346
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnZKMiniClusterTestBase.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.utils;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.zk.BlockingZKWatcher;
+import org.apache.slider.core.zk.ZKIntegration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.apache.slider.utils.KeysForTests.USERNAME;
+
+/**
+ * Base class for mini cluster tests that use Zookeeper.
+ */
+public abstract class YarnZKMiniClusterTestBase extends
+    YarnMiniClusterTestBase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(YarnZKMiniClusterTestBase.class);
+
+  private MicroZKCluster microZKCluster;
+
+  public void stopMiniCluster() {
+    super.stopMiniCluster();
+    IOUtils.closeStream(microZKCluster);
+  }
+
+  public ZKIntegration createZKIntegrationInstance(String zkQuorum,
+      String clusterName,
+      boolean createClusterPath,
+      boolean canBeReadOnly,
+      int timeout) throws IOException, InterruptedException {
+    int sessionTimeout = ZKIntegration.SESSION_TIMEOUT;
+
+    BlockingZKWatcher watcher = new BlockingZKWatcher();
+    ZKIntegration zki = ZKIntegration.newInstance(zkQuorum,
+        USERNAME,
+        clusterName,
+        createClusterPath,
+        canBeReadOnly,
+        watcher,
+        sessionTimeout);
+    boolean fromCache = zki.init();
+    //here the callback may or may not have occurred.
+    //optionally wait for it
+    if (timeout > 0 && !fromCache) {
+      watcher.waitForZKConnection(timeout);
+    }
+    //if we get here, the binding worked
+    LOG.info("Connected: {}", zki);
+    return zki;
+  }
+
+  /**
+   * Wait for a flag to go true.
+   * @param connectedFlag
+   */
+  public void waitForZKConnection(AtomicBoolean connectedFlag, int timeout)
+      throws InterruptedException {
+    synchronized (connectedFlag) {
+      if (!connectedFlag.get()) {
+        LOG.info("waiting for ZK event");
+        //wait a bit
+        connectedFlag.wait(timeout);
+      }
+    }
+    assertTrue(connectedFlag.get());
+  }
+
+  /**
+   * Create and start a minicluster with ZK.
+   * @param name cluster/test name
+   * @param conf configuration to use
+   * @param noOfNodeManagers #of NMs
+   * @param numLocalDirs #of local dirs
+   * @param numLogDirs #of log dirs
+   * @param startZK create a ZK micro cluster *THIS IS IGNORED*
+   * @param startHDFS create an HDFS mini cluster
+   */
+  protected String createMiniCluster(String name,
+                                   YarnConfiguration conf,
+                                   int noOfNodeManagers,
+                                   int numLocalDirs,
+                                   int numLogDirs,
+                                   boolean startZK,
+                                   boolean startHDFS) throws IOException {
+    if (SliderUtils.isUnset(name)) {
+      name = methodName.getMethodName();
+    }
+    createMicroZKCluster("-" + name, conf);
+    conf.setBoolean(RegistryConstants.KEY_REGISTRY_ENABLED, true);
+    conf.set(RegistryConstants.KEY_REGISTRY_ZK_QUORUM, getZKBinding());
+    //now create the cluster
+    name = super.createMiniCluster(name, conf, noOfNodeManagers,
+        numLocalDirs, numLogDirs, startHDFS);
+
+    return name;
+  }
+
+  /**
+   * Create and start a minicluster.
+   * @param name cluster/test name
+   * @param conf configuration to use
+   * @param noOfNodeManagers #of NMs
+   * @param startZK create a ZK micro cluster
+   */
+  protected String createMiniCluster(String name,
+                                   YarnConfiguration conf,
+                                   int noOfNodeManagers,
+                                   boolean startZK) throws IOException {
+    return createMiniCluster(name, conf, noOfNodeManagers, 1, 1, startZK,
+        false);
+  }
+
+  /**
+   * Create and start a minicluster with the name from the test method.
+   * @param conf configuration to use
+   * @param noOfNodeManagers #of NMs
+   * @param startZK create a ZK micro cluster
+   */
+  protected String createMiniCluster(YarnConfiguration conf,
+      int noOfNodeManagers,
+      boolean startZK) throws IOException {
+    return createMiniCluster("", conf, noOfNodeManagers, 1, 1, startZK,
+        false);
+  }
+
+  public void createMicroZKCluster(String name, Configuration conf) {
+    microZKCluster = new MicroZKCluster(new Configuration(conf));
+    microZKCluster.createCluster(name);
+  }
+
+  public void assertHasZKCluster() {
+    assertNotNull(microZKCluster);
+  }
+
+  public String getZKBinding() {
+    if (microZKCluster == null) {
+      return "localhost:1";
+    } else {
+      return microZKCluster.getZkBindingString();
+    }
+  }
+
+  /**
+   * CLI args include all the ZK bindings needed.
+   * @return
+   */
+  protected List<String> getExtraCLIArgs() {
+    return Arrays.asList(
+      "-D", define(RegistryConstants.KEY_REGISTRY_ZK_QUORUM, getZKBinding())
+    );
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/log4j.properties
new file mode 100644
index 0000000..3adbaa4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/log4j.properties
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=INFO,stdout
+log4j.threshhold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
+
+log4j.appender.subprocess=org.apache.log4j.ConsoleAppender
+log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout
+log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n
+#log4j.logger.org.apache.slider.yarn.appmaster.SliderAppMasterer.master=INFO,subprocess
+
+log4j.logger.org.apache.slider=DEBUG
+log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG
+log4j.logger.org.apache.hadoop.yarn.registry=DEBUG
+
+#log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG
+#log4j.logger.org.apache.hadoop.yarn.service=DEBUG
+#log4j.logger.org.apache.hadoop.yarn.client=DEBUG
+
+#crank back on some noise
+log4j.logger.org.apache.hadoop.ipc.CallQueueManager=WARN
+
+log4j.logger.org.apache.hadoop.util.Shell=ERROR
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+log4j.logger.org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager=FATAL
+org.apache.hadoop.security.authentication.server.AuthenticationFilter=WARN
+log4j.logger.org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner=WARN
+log4j.logger.org.apache.hadoop.hdfs.server.blockmanagement=WARN
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN
+log4j.logger.org.apache.hadoop.hdfs=WARN
+log4j.logger.BlockStateChange=WARN
+
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN
+log4j.logger.org.apache.zookeeper=WARN
+log4j.logger.org.apache.zookeeper.ClientCnxn=FATAL
+
+log4j.logger.org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl=WARN
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeResourceMonitorImpl=ERROR
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.security=WARN
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher=WARN
+log4j.logger.org.apache.hadoop.metrics2=ERROR
+log4j.logger.org.apache.hadoop.util.HostsFileReader=WARN
+log4j.logger.org.apache.hadoop.yarn.event.AsyncDispatcher=WARN
+log4j.logger.org.apache.hadoop.security.token.delegation=WARN
+log4j.logger.org.apache.hadoop.yarn.util.AbstractLivelinessMonitor=WARN
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.security=WARN
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMNMInfo=WARN

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.txt b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.txt
deleted file mode 100644
index a1d7780..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.xml
deleted file mode 100644
index cb8eab2..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.xml
+++ /dev/null
@@ -1,98 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <application>
-    <name>STORM</name>
-    <comment>Apache Hadoop Stream processing framework</comment>
-    <version>0.9.1.2.1</version>
-    <components>
-
-      <component>
-        <name>NIMBUS</name>
-        <category>MASTER</category>
-        <commandScript>
-          <script>scripts/nimbus.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>STORM_REST_API</name>
-        <category>MASTER</category>
-        <commandScript>
-          <script>scripts/rest_api.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>SUPERVISOR</name>
-        <category>SLAVE</category>
-        <commandScript>
-          <script>scripts/supervisor.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>STORM_UI_SERVER</name>
-        <category>MASTER</category>
-        <commandScript>
-          <script>scripts/ui_server.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>DRPC_SERVER</name>
-        <category>MASTER</category>
-        <commandScript>
-          <script>scripts/drpc_server.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-    </components>
-
-    <osSpecifics>
-      <osSpecific>
-        <osType>any</osType>
-        <packages>
-          <package>
-            <type>tarball</type>
-            <name>files/apache-storm-0.9.1.2.1.1.0-237.tar.gz</name>
-          </package>
-        </packages>
-      </osSpecific>
-    </osSpecifics>
-
-    <configFiles>
-      <configFile>
-        <type>xml</type>
-        <fileName>storm-site.xml</fileName>
-        <dictionaryName>storm-site</dictionaryName>
-      </configFile>
-    </configFiles>
-  </application>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.txt b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.txt
deleted file mode 100644
index a1d7780..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.xml
deleted file mode 100644
index f86e687..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.xml
+++ /dev/null
@@ -1,17 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo></metainfo>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override-resolved.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override-resolved.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override-resolved.json
new file mode 100644
index 0000000..e2a21ea
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override-resolved.json
@@ -0,0 +1,49 @@
+{
+  "name": "app-1",
+  "lifetime": "3600",
+  "configuration": {
+    "properties": {
+      "g1": "a",
+      "g2": "b"
+    }
+  },
+  "resource": {
+    "cpus": 1,
+    "memory": "512"
+  },
+  "number_of_containers": 2,
+  "components": [
+    {
+      "name": "simple",
+      "configuration": {
+        "properties": {
+          "g1": "a",
+          "g2": "b"
+        }
+      }
+    },
+    {
+      "name": "master",
+      "configuration": {
+        "properties": {
+          "g1": "overridden",
+          "g2": "b"
+        }
+      }
+    },
+    {
+      "name": "worker",
+      "resource": {
+        "cpus": 1,
+        "memory": "1024"
+      },
+      "configuration": {
+        "properties": {
+          "g1": "overridden-by-worker",
+          "g2": "b",
+          "timeout": "1000"
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override.json
new file mode 100644
index 0000000..552cdef
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override.json
@@ -0,0 +1,43 @@
+{
+  "name": "app-1",
+  "lifetime": "3600",
+  "configuration": {
+    "properties": {
+      "g1": "a",
+      "g2": "b"
+    }
+  },
+  "resource": {
+    "cpus": 1,
+    "memory": "512"
+  },
+  "number_of_containers": 2,
+  "components": [
+    {
+      "name": "simple"
+    },
+    {
+      "name": "master",
+      "configuration": {
+        "properties": {
+          "name": "m",
+          "g1": "overridden"
+        }
+      }
+    },
+    {
+      "name": "worker",
+      "resource": {
+        "cpus": 1,
+        "memory": "1024"
+      },
+      "configuration": {
+        "properties": {
+          "name": "worker",
+          "g1": "overridden-by-worker",
+          "timeout": "1000"
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-resolved.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-resolved.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-resolved.json
new file mode 100644
index 0000000..cd1ab6f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-resolved.json
@@ -0,0 +1,81 @@
+{
+  "name": "zk-app-1",
+  "lifetime": "3600",
+  "configuration": {
+    "properties": {
+      "internal.chaos.monkey.interval.seconds": "60",
+      "zookeeper.port": "2181",
+      "zookeeper.path": "/yarnapps_small_cluster",
+      "zookeeper.hosts": "zoo1,zoo2,zoo3",
+      "env.MALLOC_ARENA_MAX": "4",
+      "site.hbase.master.startup.retainassign": "true",
+      "site.fs.defaultFS": "hdfs://cluster:8020",
+      "site.fs.default.name": "hdfs://cluster:8020",
+      "site.hbase.master.info.port": "0",
+      "site.hbase.regionserver.info.port": "0"
+    }
+  },
+  "resource": {
+    "cpus": 1,
+    "memory": "512"
+  },
+  "number_of_containers": 2,
+  "components": [
+    {
+      "name": "simple",
+      "number_of_containers": 2,
+      "configuration": {
+        "properties": {
+          "g1": "a",
+          "g2": "b"
+        }
+      }
+    },
+    {
+      "name": "master",
+      "number_of_containers": 1,
+      "resource": {
+        "cpus": 1,
+        "memory": "512"
+      },
+      "configuration": {
+        "properties": {
+          "zookeeper.port": "2181",
+          "zookeeper.path": "/yarnapps_small_cluster",
+          "zookeeper.hosts": "zoo1,zoo2,zoo3",
+          "env.MALLOC_ARENA_MAX": "4",
+          "site.hbase.master.startup.retainassign": "true",
+          "site.fs.defaultFS": "hdfs://cluster:8020",
+          "site.fs.default.name": "hdfs://cluster:8020",
+          "site.hbase.master.info.port": "0",
+          "site.hbase.regionserver.info.port": "0",
+          "jvm.heapsize": "512M"
+        }
+      }
+    },
+    {
+      "name": "worker",
+      "number_of_containers": 5,
+      "resource": {
+        "cpus": 1,
+        "memory": "1024"
+      },
+      "configuration": {
+        "properties": {
+          "g1": "overridden-by-worker",
+          "g2": "b",
+          "zookeeper.port": "2181",
+          "zookeeper.path": "/yarnapps_small_cluster",
+          "zookeeper.hosts": "zoo1,zoo2,zoo3",
+          "env.MALLOC_ARENA_MAX": "4",
+          "site.hbase.master.startup.retainassign": "true",
+          "site.fs.defaultFS": "hdfs://cluster:8020",
+          "site.fs.default.name": "hdfs://cluster:8020",
+          "site.hbase.master.info.port": "0",
+          "site.hbase.regionserver.info.port": "0",
+          "jvm.heapsize": "512M"
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app.json
new file mode 100644
index 0000000..90857db
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app.json
@@ -0,0 +1,54 @@
+{
+  "name": "app-1",
+  "lifetime": "3600",
+  "configuration": {
+    "properties": {
+      "g1": "a",
+      "g2": "b",
+      "internal.chaos.monkey.interval.seconds": "60",
+      "zookeeper.port": "2181",
+      "zookeeper.path": "/yarnapps_small_cluster",
+      "zookeeper.hosts": "zoo1,zoo2,zoo3",
+      "env.MALLOC_ARENA_MAX": "4",
+      "site.hbase.master.startup.retainassign": "true",
+      "site.fs.defaultFS": "hdfs://cluster:8020",
+      "site.fs.default.name": "hdfs://cluster:8020",
+      "site.hbase.master.info.port": "0",
+      "site.hbase.regionserver.info.port": "0"
+    }
+  },
+  "resource": {
+    "cpus": 1,
+    "memory": "512"
+  },
+  "number_of_containers": 2,
+  "components": [
+    {
+      "name": "simple"
+    },
+    {
+      "name": "master",
+      "number_of_containers": 1,
+      "configuration": {
+        "properties": {
+          "g1": "overridden",
+          "jvm.heapsize": "512M"
+        }
+      }
+    },
+    {
+      "name": "worker",
+      "number_of_containers": 5,
+      "resource": {
+        "cpus": 1,
+        "memory": "1024"
+      },
+      "configuration": {
+        "properties": {
+          "g1": "overridden-by-worker",
+          "jvm.heapsize": "512M"
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/providers/agent/application/metadata/metainfo.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/providers/agent/application/metadata/metainfo.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/providers/agent/application/metadata/metainfo.xml
deleted file mode 100644
index fbe9299..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/providers/agent/application/metadata/metainfo.xml
+++ /dev/null
@@ -1,180 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <application>
-    <name>STORM</name>
-    <comment>Apache Hadoop Stream processing framework</comment>
-    <version>0.9.1.2.1</version>
-    <exportedConfigs>storm-site</exportedConfigs>
-
-    <exportGroups>
-      <exportGroup>
-        <name>QuickLinks</name>
-        <exports>
-          <export>
-            <name>app.jmx</name>
-            <value>http://${STORM_REST_API_HOST}:${site.global.rest_api_port}/api/cluster/summary</value>
-          </export>
-          <export>
-            <name>app.monitor</name>
-            <value>http://${STORM_UI_SERVER_HOST}:${site.storm-site.ui.port}</value>
-          </export>
-          <export>
-            <name>app.metrics</name>
-            <value>http://${site.global.ganglia_server_host}/cgi-bin/rrd.py?c=${site.global.ganglia_server_id}</value>
-          </export>
-          <export>
-            <name>ganglia.ui</name>
-            <value>http://${site.global.ganglia_server_host}/ganglia?c=${site.global.ganglia_server_id}</value>
-          </export>
-          <export>
-            <name>nimbus.url</name>
-            <value>http://${NIMBUS_HOST}:${site.storm-site.nimbus.thrift.port}</value>
-          </export>
-        </exports>
-      </exportGroup>
-    </exportGroups>
-
-    <commandOrders>
-      <commandOrder>
-        <command>NIMBUS-START</command>
-        <requires>SUPERVISOR-INSTALLED,STORM_UI_SERVER-INSTALLED,DRPC_SERVER-INSTALLED,STORM_REST_API-INSTALLED
-        </requires>
-      </commandOrder>
-      <commandOrder>
-        <command>SUPERVISOR-START</command>
-        <requires>NIMBUS-STARTED</requires>
-      </commandOrder>
-      <commandOrder>
-        <command>DRPC_SERVER-START</command>
-        <requires>NIMBUS-STARTED</requires>
-      </commandOrder>
-      <commandOrder>
-        <command>STORM_REST_API-START</command>
-        <requires>NIMBUS-STARTED,DRPC_SERVER-STARTED,STORM_UI_SERVER-STARTED</requires>
-      </commandOrder>
-      <commandOrder>
-        <command>STORM_UI_SERVER-START</command>
-        <requires>NIMBUS-STARTED</requires>
-      </commandOrder>
-    </commandOrders>
-
-    <components>
-
-      <component>
-        <name>NIMBUS</name>
-        <category>MASTER</category>
-        <autoStartOnFailure>true</autoStartOnFailure>
-        <appExports>QuickLinks-nimbus.url,QuickLinks-ganglia.ui,QuickLinks-app.metrics</appExports>
-        <commandScript>
-          <script>scripts/nimbus.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>STORM_REST_API</name>
-        <category>MASTER</category>
-        <autoStartOnFailure>true</autoStartOnFailure>
-        <appExports>QuickLinks-app.jmx</appExports>
-        <commandScript>
-          <script>scripts/rest_api.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>SUPERVISOR</name>
-        <category>SLAVE</category>
-        <autoStartOnFailure>true</autoStartOnFailure>
-        <componentExports>
-          <componentExport>
-            <name>log_viewer_port</name>
-            <value>${THIS_HOST}:${site.storm-site.logviewer.port}</value>
-          </componentExport>
-        </componentExports>
-        <commandScript>
-          <script>scripts/supervisor.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>STORM_UI_SERVER</name>
-        <category>MASTER</category>
-        <publishConfig>true</publishConfig>
-        <appExports>QuickLinks-app.monitor</appExports>
-        <autoStartOnFailure>true</autoStartOnFailure>
-        <commandScript>
-          <script>scripts/ui_server.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>DRPC_SERVER</name>
-        <category>MASTER</category>
-        <autoStartOnFailure>true</autoStartOnFailure>
-        <commandScript>
-          <script>scripts/drpc_server.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>ANOTHER_COMPONENT</name>
-        <category>MASTER</category>
-        <commands>
-          <command>
-            <exec>start command</exec>
-          </command>
-          <command>
-            <exec>stop command</exec>
-            <name>STOP</name>
-          </command>
-        </commands>
-      </component>
-    </components>
-
-    <osSpecifics>
-      <osSpecific>
-        <osType>any</osType>
-        <packages>
-          <package>
-            <type>tarball</type>
-            <name>files/apache-storm-0.9.1.2.1.1.0-237.tar.gz</name>
-          </package>
-        </packages>
-      </osSpecific>
-    </osSpecifics>
-
-    <packages>
-      <package>
-        <type>tarball</type>
-        <name>test-tarball-name.tgz</name>
-      </package>
-    </packages>
-  </application>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/appmaster/web/rest/registry/sample.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/appmaster/web/rest/registry/sample.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/appmaster/web/rest/registry/sample.json
new file mode 100644
index 0000000..bc6429c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/appmaster/web/rest/registry/sample.json
@@ -0,0 +1,9 @@
+{
+  "nodes": ["/users/example/services/org-apache-slider/test-registry-rest-resources/components"], "service": {
+  "description": "Slider Application Master",
+  "yarn:id": "application_1411664296263_0001",
+  "yarn:persistence": 1,
+  "external": [],
+  "internal": []
+}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-3-role.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-3-role.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-3-role.json
new file mode 100644
index 0000000..ceab0a5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-3-role.json
@@ -0,0 +1,6 @@
+{"entry":{"org.apache.slider.server.avro.RoleHistoryHeader":{"version":1,"saved":1415296260647,"savedx":"149863b1a27","savedate":"6 Nov 2014 17:51:00 GMT","roles":3}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":1,"active":false,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":2,"active":false,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":0,"active":false,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.86","role":2,"active":true,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.RoleHistoryFooter":{"count":4}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-6-role.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-6-role.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-6-role.json
new file mode 100644
index 0000000..f1c53d5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-6-role.json
@@ -0,0 +1,8 @@
+{"entry":{"org.apache.slider.server.avro.RoleHistoryHeader":{"version":1,"saved":1415296260647,"savedx":"149863b1a27","savedate":"6 Nov 2014 17:51:00 GMT","roles":6}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":1,"active":false,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":2,"active":false,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":0,"active":false,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.86","role":4,"active":true,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.86","role":5,"active":true,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.86","role":6,"active":true,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.RoleHistoryFooter":{"count":6}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history_v01b_1_role.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history_v01b_1_role.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history_v01b_1_role.json
new file mode 100644
index 0000000..67d644f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history_v01b_1_role.json
@@ -0,0 +1,38 @@
+{
+  "entry": {
+    "org.apache.slider.server.avro.RoleHistoryHeader": {
+      "version": 1,
+      "saved": 1450435691617,
+      "savedx": "151b4b44461",
+      "savedate": "18 Dec 2015 10:48:11 GMT",
+      "roles": 2
+    }
+  }
+}
+{
+  "entry": {
+    "org.apache.slider.server.avro.RoleHistoryMapping": {
+      "rolemap": {
+        "echo": 1,
+        "slider-appmaster": 0
+      }
+    }
+  }
+}
+{
+  "entry": {
+    "org.apache.slider.server.avro.NodeEntryRecord": {
+      "host": "192.168.56.1",
+      "role": 1,
+      "active": true,
+      "last_used": 0
+    }
+  }
+}
+{
+  "entry": {
+    "org.apache.slider.server.avro.RoleHistoryFooter": {
+      "count": 1
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: YARN-6255. Refactor yarn-native-services framework. Contributed by Jian He

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/AppDefinitionPersister.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/AppDefinitionPersister.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/AppDefinitionPersister.java
deleted file mode 100644
index 9eb7d5c..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/AppDefinitionPersister.java
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.core.persist;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.io.Files;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.fs.Path;
-import org.apache.slider.common.SliderKeys;
-import org.apache.slider.common.params.AbstractClusterBuildingActionArgs;
-import org.apache.slider.common.params.Arguments;
-import org.apache.slider.common.tools.SliderFileSystem;
-import org.apache.slider.common.tools.SliderUtils;
-import org.apache.slider.core.conf.ConfTreeOperations;
-import org.apache.slider.core.exceptions.BadCommandArgumentsException;
-import org.apache.slider.core.exceptions.BadConfigException;
-import org.apache.slider.providers.agent.AgentKeys;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Class to prepare and persist app and add-on definitions.
- *
- * In this case, the app definition and add-on definitions are auto-inferred from the user input rather than explicit
- * inclusion of application package in the config.
- *
- * Processing an app definition involves one or more of the following: - modify appConfig - package definition into a
- * temporary folder - upload to HDFS
- *
- * This class keeps track of all the required operations and allows them to be invoked by build operation
- */
-public class AppDefinitionPersister {
-  private static final Logger log =
-      LoggerFactory.getLogger(AppDefinitionPersister.class);
-
-  private final SliderFileSystem sliderFileSystem;
-  private List<AppDefinition> appDefinitions;
-
-  public AppDefinitionPersister(SliderFileSystem sliderFileSystem) {
-    this.sliderFileSystem = sliderFileSystem;
-    appDefinitions = new ArrayList<>();
-  }
-
-
-  /**
-   * Process the application package or folder by copying it to the cluster path
-   *
-   * @param appDefinition details of application package
-   *
-   * @throws BadConfigException
-   * @throws IOException
-   */
-  private void persistDefinitionPackageOrFolder(AppDefinition appDefinition)
-      throws BadConfigException, IOException {
-    if (!appDefinition.appDefPkgOrFolder.canRead()) {
-      throw new BadConfigException("Pkg/Folder cannot be accessed - "
-                                   + appDefinition.appDefPkgOrFolder.getAbsolutePath());
-    }
-
-    File src = appDefinition.appDefPkgOrFolder;
-    String targetName = appDefinition.pkgName;
-    log.debug("Package name: " + targetName);
-    if (appDefinition.appDefPkgOrFolder.isDirectory()) {
-      log.info("Processing app package/folder {} for {}",
-               appDefinition.appDefPkgOrFolder.getAbsolutePath(),
-               appDefinition.pkgName);
-      File tmpDir = Files.createTempDir();
-      File zipFile = new File(tmpDir.getCanonicalPath(), File.separator + appDefinition.pkgName);
-      SliderUtils.zipFolder(appDefinition.appDefPkgOrFolder, zipFile);
-      src = zipFile;
-    }
-
-    sliderFileSystem.getFileSystem().copyFromLocalFile(
-        false,
-        false,
-        new Path(src.toURI()),
-        new Path(appDefinition.targetFolderInFs, targetName));
-  }
-
-  public void persistPackages() throws BadConfigException, IOException {
-    for (AppDefinition appDefinition : appDefinitions) {
-      persistDefinitionPackageOrFolder(appDefinition);
-    }
-  }
-
-  public void processSuppliedDefinitions(String clustername,
-                                         AbstractClusterBuildingActionArgs buildInfo,
-                                         ConfTreeOperations appConf)
-      throws BadConfigException, IOException, BadCommandArgumentsException {
-    // if metainfo is provided add to the app instance
-    if (buildInfo.appMetaInfo != null || buildInfo.appMetaInfoJson != null) {
-      if (buildInfo.appMetaInfo != null && buildInfo.appMetaInfoJson != null) {
-        throw new BadConfigException("Both %s and %s cannot be specified",
-            Arguments.ARG_METAINFO, Arguments.ARG_METAINFO_JSON);
-      }
-
-      // Now we know that only one of either file or JSON is used
-      boolean isFileUsed = buildInfo.appMetaInfo != null ? true : false;
-      String argUsed = isFileUsed ? Arguments.ARG_METAINFO
-          : Arguments.ARG_METAINFO_JSON;
-
-      if (buildInfo.appDef != null) {
-        throw new BadConfigException("Both %s and %s cannot be specified",
-            argUsed, Arguments.ARG_APPDEF);
-      }
-      if (SliderUtils.isSet(appConf.getGlobalOptions().get(AgentKeys.APP_DEF))) {
-        throw new BadConfigException(
-            "%s cannot not be set if %s is specified in the cmd line ",
-            AgentKeys.APP_DEF, argUsed);
-      }
-
-      if (isFileUsed) {
-        if (!buildInfo.appMetaInfo.canRead() || !buildInfo.appMetaInfo.isFile()) {
-          throw new BadConfigException(
-              "Path specified with %s either cannot be read or is not a file",
-              Arguments.ARG_METAINFO);
-        }
-      } else {
-        if (StringUtils.isEmpty(buildInfo.appMetaInfoJson.trim())) {
-          throw new BadConfigException("Empty string specified with %s",
-              Arguments.ARG_METAINFO_JSON);
-        }
-      }
-
-      File tempDir = Files.createTempDir();
-      File pkgSrcDir = new File(tempDir, "default");
-      if (!pkgSrcDir.exists() && !pkgSrcDir.mkdirs()) {
-        throw new IOException("Failed to create directory: " + pkgSrcDir);
-      }
-      File destMetaInfo = new File(pkgSrcDir, "metainfo.json");
-      if (isFileUsed) {
-        if (buildInfo.appMetaInfo.getName().endsWith(".xml")) {
-          Files.copy(buildInfo.appMetaInfo, new File(pkgSrcDir, "metainfo.xml"));
-        } else {
-          Files.copy(buildInfo.appMetaInfo, destMetaInfo);
-        }
-      } else {
-        Files.write(
-            buildInfo.appMetaInfoJson.getBytes(Charset.forName("UTF-8")),
-            destMetaInfo);
-      }
-
-      Path appDirPath = sliderFileSystem.buildAppDefDirPath(clustername);
-      log.info("Using default app def path {}", appDirPath.toString());
-
-      appDefinitions.add(new AppDefinition(appDirPath, pkgSrcDir,
-          SliderKeys.DEFAULT_APP_PKG));
-      Path appDefPath = new Path(appDirPath, SliderKeys.DEFAULT_APP_PKG);
-      appConf.getGlobalOptions().set(AgentKeys.APP_DEF, appDefPath);
-      log.info("Setting app package to {}.", appDefPath);
-    }
-
-    if (buildInfo.appDef != null) {
-      if (SliderUtils.isSet(appConf.getGlobalOptions().get(AgentKeys.APP_DEF))) {
-        throw new BadConfigException("application.def must not be set if --appdef is provided.");
-      }
-
-      if (!buildInfo.appDef.exists()) {
-        throw new BadConfigException("--appdef is not a valid path.");
-      }
-
-      Path appDirPath = sliderFileSystem.buildAppDefDirPath(clustername);
-      appDefinitions.add(new AppDefinition(appDirPath, buildInfo.appDef, SliderKeys.DEFAULT_APP_PKG));
-      Path appDefPath = new Path(appDirPath, SliderKeys.DEFAULT_APP_PKG);
-      appConf.getGlobalOptions().set(AgentKeys.APP_DEF, appDefPath);
-      log.info("Setting app package to {}.", appDefPath);
-    }
-
-    if (buildInfo.addonDelegate.getAddonMap().size() > 0) {
-      if (SliderUtils.isUnset(appConf.getGlobalOptions().get(AgentKeys.APP_DEF))) {
-        throw new BadConfigException("addon package can only be specified if main app package is specified.");
-      }
-
-      List<String> addons = new ArrayList<String>();
-      Map<String, String> addonMap = buildInfo.addonDelegate.getAddonMap();
-      for (Map.Entry<String, String > entry : addonMap.entrySet()) {
-        String key = entry.getKey();
-        String value = entry.getValue();
-        if (SliderUtils.isUnset(value)) {
-          throw new BadConfigException("Invalid path for addon package " + key);
-        }
-        File defPath = new File(value);
-        if (!defPath.exists()) {
-          throw new BadConfigException("addon folder or package path is not valid.");
-        }
-
-        Path addonPath = sliderFileSystem.buildAddonDirPath(clustername, key);
-        String addonPkgName = "addon_" + key + ".zip";
-
-        log.debug(
-            "addonMap.get(key): {} addonPath: {} defPath: {} addonPkgName: {}",
-            addonMap.get(key), addonPath, defPath, addonPkgName);
-
-        appDefinitions.add(new AppDefinition(addonPath, defPath, addonPkgName));
-        String addOnKey = AgentKeys.ADDON_PREFIX + key;
-        Path addonPkgPath = new Path(addonPath, addonPkgName);
-        log.info("Setting addon package {} to {}.", addOnKey, addonPkgPath);
-        appConf.getGlobalOptions().set(addOnKey, addonPkgPath);
-        addons.add(addOnKey);
-      }
-
-      String existingList = appConf.getGlobalOptions().get(AgentKeys.ADDONS);
-      if (SliderUtils.isUnset(existingList)) {
-        existingList = "";
-      }
-      appConf.getGlobalOptions().set(AgentKeys.ADDONS, existingList + StringUtils.join(addons, ","));
-    }
-  }
-
-
-  @VisibleForTesting
-  public List<AppDefinitionPersister.AppDefinition> getAppDefinitions() {
-    return appDefinitions;
-  }
-
-  // Helper class to hold details for the app and addon packages
-  static class AppDefinition {
-    // The target folder where the package will be stored
-    public Path targetFolderInFs;
-    // The on disk location of the app def package or folder
-    public File appDefPkgOrFolder;
-    // Package name
-    public String pkgName;
-
-    public AppDefinition(Path targetFolderInFs, File appDefPkgOrFolder, String pkgName) {
-      this.targetFolderInFs = targetFolderInFs;
-      this.appDefPkgOrFolder = appDefPkgOrFolder;
-      this.pkgName = pkgName;
-    }
-
-    @Override
-    public String toString() {
-      return new StringBuilder().append("targetFolderInFs").append(" : ").append(targetFolderInFs.toString())
-          .append(", ")
-          .append("appDefPkgOrFolder").append(" : ").append(appDefPkgOrFolder.toString())
-          .append(", ")
-          .append("pkgName").append(" : ").append(pkgName).toString();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/JsonSerDeser.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/JsonSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/JsonSerDeser.java
index 4f60c06..8fe2549 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/JsonSerDeser.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/JsonSerDeser.java
@@ -29,6 +29,7 @@ import org.codehaus.jackson.JsonParseException;
 import org.codehaus.jackson.map.DeserializationConfig;
 import org.codehaus.jackson.map.JsonMappingException;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.PropertyNamingStrategy;
 import org.codehaus.jackson.map.SerializationConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -65,6 +66,11 @@ public class JsonSerDeser<T> {
     mapper.configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, false);
   }
 
+  public JsonSerDeser(Class<T> classType, PropertyNamingStrategy namingStrategy) {
+    this(classType);
+    mapper.setPropertyNamingStrategy(namingStrategy);
+  }
+
   /**
    * Convert from JSON
    * @param json input

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
index 01444fd..42e103a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
@@ -103,19 +103,6 @@ public abstract class AbstractClientProvider extends Configured {
 
 
   /**
-   * Any provider-side alteration of a configuration can take place here.
-   * @param aggregateConf config to patch
-   * @throws IOException IO problems
-   * @throws SliderException Slider-specific issues
-   */
-  public void prepareInstanceConfiguration(AggregateConf aggregateConf) throws
-      SliderException,
-                                                                    IOException {
-    //default: do nothing
-  }
-
-
-  /**
    * Prepare the AM settings for launch
    * @param fileSystem filesystem
    * @param serviceConf configuration of the client
@@ -234,7 +221,7 @@ public abstract class AbstractClientProvider extends Configured {
    * @param appDescription brief description of the application
    * @return
    */
-  public final Set<String> createApplicationTags(String appName,
+  public static final Set<String> createApplicationTags(String appName,
       String appVersion, String appDescription) {
     Set<String> tags = new HashSet<>();
     tags.add(SliderUtils.createNameTag(appName));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java
deleted file mode 100644
index 41b26e9..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.providers;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
-import org.apache.hadoop.registry.client.exceptions.InvalidRecordException;
-import org.apache.hadoop.registry.client.types.AddressTypes;
-import org.apache.hadoop.registry.client.types.Endpoint;
-import org.apache.hadoop.registry.client.types.ServiceRecord;
-import org.apache.hadoop.service.Service;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.client.api.AMRMClient;
-import org.apache.slider.api.ClusterDescription;
-import org.apache.slider.common.SliderKeys;
-import org.apache.slider.common.tools.ConfigHelper;
-import org.apache.slider.common.tools.SliderFileSystem;
-import org.apache.slider.common.tools.SliderUtils;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.exceptions.BadCommandArgumentsException;
-import org.apache.slider.core.exceptions.SliderException;
-import org.apache.slider.core.main.ExitCodeProvider;
-import org.apache.slider.server.appmaster.actions.QueueAccess;
-import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
-import org.apache.slider.server.appmaster.state.ContainerReleaseSelector;
-import org.apache.slider.server.appmaster.state.MostRecentContainerReleaseSelector;
-import org.apache.slider.server.appmaster.state.StateAccessForProviders;
-import org.apache.slider.server.services.workflow.ForkedProcessService;
-import org.apache.slider.server.services.workflow.ServiceParent;
-import org.apache.slider.server.services.workflow.WorkflowSequenceService;
-import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-
-/**
- * The base class for provider services. It lets the implementations
- * add sequences of operations, and propagates service failures
- * upstream
- */
-public abstract class AbstractProviderService
-    extends WorkflowSequenceService
-    implements
-    ProviderCore,
-    SliderKeys,
-    ProviderService {
-  private static final Logger log =
-    LoggerFactory.getLogger(AbstractProviderService.class);
-  protected StateAccessForProviders amState;
-  protected URL amWebAPI;
-  protected YarnRegistryViewForProviders yarnRegistry;
-  protected QueueAccess queueAccess;
-
-  protected AbstractProviderService(String name) {
-    super(name);
-    setStopIfNoChildServicesAtStartup(false);
-  }
-
-  @Override
-  public Configuration getConf() {
-    return getConfig();
-  }
-
-  public StateAccessForProviders getAmState() {
-    return amState;
-  }
-
-  public QueueAccess getQueueAccess() {
-    return queueAccess;
-  }
-
-  public void setAmState(StateAccessForProviders amState) {
-    this.amState = amState;
-  }
-
-  @Override
-  public String getHumanName() {
-    return getName().toLowerCase(Locale.ENGLISH);
-  }
-  
-  @Override
-  public void bind(StateAccessForProviders stateAccessor,
-      QueueAccess queueAccess,
-      List<Container> liveContainers) {
-    this.amState = stateAccessor;
-    this.queueAccess = queueAccess;
-  }
-
-  @Override
-  public void bindToYarnRegistry(YarnRegistryViewForProviders yarnRegistry) {
-    this.yarnRegistry = yarnRegistry;
-  }
-
-  public YarnRegistryViewForProviders getYarnRegistry() {
-    return yarnRegistry;
-  }
-
-  @Override
-  public void notifyContainerCompleted(ContainerId containerId) {
-  }
-
-  /**
-   * Load default Configuration
-   * @param confDir configuration directory
-   * @return configuration
-   * @throws BadCommandArgumentsException
-   * @throws IOException
-   */
-  @Override
-  public Configuration loadProviderConfigurationInformation(File confDir)
-      throws BadCommandArgumentsException, IOException {
-    return new Configuration(false);
-  }
-
-  /**
-   * Load a specific XML configuration file for the provider config
-   * @param confDir configuration directory
-   * @param siteXMLFilename provider-specific filename
-   * @return a configuration to be included in status
-   * @throws BadCommandArgumentsException argument problems
-   * @throws IOException IO problems
-   */
-  protected Configuration loadProviderConfigurationInformation(File confDir,
-                                                               String siteXMLFilename)
-    throws BadCommandArgumentsException, IOException {
-    Configuration siteConf;
-    File siteXML = new File(confDir, siteXMLFilename);
-    if (!siteXML.exists()) {
-      throw new BadCommandArgumentsException(
-        "Configuration directory %s doesn't contain %s - listing is %s",
-        confDir, siteXMLFilename, SliderUtils.listDir(confDir));
-    }
-
-    //now read it in
-    siteConf = ConfigHelper.loadConfFromFile(siteXML);
-    log.info("{} file is at {}", siteXMLFilename, siteXML);
-    log.info(ConfigHelper.dumpConfigToString(siteConf));
-    return siteConf;
-  }
-
-  /**
-   * No-op implementation of this method.
-   */
-  @Override
-  public void initializeApplicationConfiguration(
-      AggregateConf instanceDefinition, SliderFileSystem fileSystem,
-      String roleGroup)
-      throws IOException, SliderException {
-  }
-
-  /**
-   * No-op implementation of this method.
-   *
-   * {@inheritDoc}
-   */
-  @Override
-  public void validateApplicationConfiguration(AggregateConf instance,
-                                               File confDir,
-                                               boolean secure)
-      throws IOException, SliderException {
-
-  }
-
-  /**
-   * Scan through the roles and see if it is supported.
-   * @param role role to look for
-   * @return true if the role is known about -and therefore
-   * that a launcher thread can be deployed to launch it
-   */
-  @Override
-  public boolean isSupportedRole(String role) {
-    Collection<ProviderRole> roles = getRoles();
-    for (ProviderRole providedRole : roles) {
-      if (providedRole.name.equals(role)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * override point to allow a process to start executing in this container
-   * @param instanceDefinition cluster description
-   * @param confDir configuration directory
-   * @param env environment
-   * @param execInProgress the callback for the exec events
-   * @return false
-   * @throws IOException
-   * @throws SliderException
-   */
-  @Override
-  public boolean exec(AggregateConf instanceDefinition,
-      File confDir,
-      Map<String, String> env,
-      ProviderCompleted execInProgress) throws IOException, SliderException {
-    return false;
-  }
-
-  @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
-  @Override // ExitCodeProvider
-  public int getExitCode() {
-    Throwable cause = getFailureCause();
-    if (cause != null) {
-      //failed for some reason
-      if (cause instanceof ExitCodeProvider) {
-        return ((ExitCodeProvider) cause).getExitCode();
-      }
-    }
-    ForkedProcessService lastProc = latestProcess();
-    if (lastProc == null || !lastProc.isProcessTerminated()) {
-      return 0;
-    } else {
-      return lastProc.getExitCode();
-    }
-  }
-
-  /**
-   * Return the latest forked process service that ran
-   * @return the forkes service
-   */
-  protected ForkedProcessService latestProcess() {
-    Service current = getActiveService();
-    Service prev = getPreviousService();
-
-    Service latest = current != null ? current : prev;
-    if (latest instanceof ForkedProcessService) {
-      return (ForkedProcessService) latest;
-    } else {
-      //its a composite object, so look inside it for a process
-      if (latest instanceof ServiceParent) {
-        return getFPSFromParentService((ServiceParent) latest);
-      } else {
-        //no match
-        return null;
-      }
-    }
-  }
-
-
-  /**
-   * Given a parent service, find the one that is a forked process
-   * @param serviceParent parent
-   * @return the forked process service or null if there is none
-   */
-  protected ForkedProcessService getFPSFromParentService(ServiceParent serviceParent) {
-    List<Service> services = serviceParent.getServices();
-    for (Service s : services) {
-      if (s instanceof ForkedProcessService) {
-        return (ForkedProcessService) s;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * if we are already running, start this service
-   */
-  protected void maybeStartCommandSequence() {
-    if (isInState(STATE.STARTED)) {
-      startNextService();
-    }
-  }
-
-  /**
-   * Create a new forked process service with the given
-   * name, environment and command list -then add it as a child
-   * for execution in the sequence.
-   *
-   * @param name command name
-   * @param env environment
-   * @param commands command line
-   * @throws IOException
-   * @throws SliderException
-   */
-  protected ForkedProcessService queueCommand(String name,
-                              Map<String, String> env,
-                              List<String> commands) throws
-                                                     IOException,
-      SliderException {
-    ForkedProcessService process = buildProcess(name, env, commands);
-    //register the service for lifecycle management; when this service
-    //is terminated, so is the master process
-    addService(process);
-    return process;
-  }
-
-  public ForkedProcessService buildProcess(String name,
-                                           Map<String, String> env,
-                                           List<String> commands) throws
-                                                                  IOException,
-      SliderException {
-    ForkedProcessService process;
-    process = new ForkedProcessService(name);
-    process.init(getConfig());
-    process.build(env, commands);
-    return process;
-  }
-
-  /*
-   * Build the provider status, can be empty
-   * @return the provider status - map of entries to add to the info section
-   */
-  @Override
-  public Map<String, String> buildProviderStatus() {
-    return new HashMap<String, String>();
-  }
-
-  /*
-  Build the monitor details. The base implementation includes all the external URL endpoints
-  in the external view
-   */
-  @Override
-  public Map<String, MonitorDetail> buildMonitorDetails(ClusterDescription clusterDesc) {
-    Map<String, MonitorDetail> details = new LinkedHashMap<String, MonitorDetail>();
-
-    // add in all the endpoints
-    buildEndpointDetails(details);
-
-    return details;
-  }
-
-  @Override
-  public void buildEndpointDetails(Map<String, MonitorDetail> details) {
-    ServiceRecord self = yarnRegistry.getSelfRegistration();
-
-    List<Endpoint> externals = self.external;
-    for (Endpoint endpoint : externals) {
-      String addressType = endpoint.addressType;
-      if (AddressTypes.ADDRESS_URI.equals(addressType)) {
-        try {
-          List<URL> urls = RegistryTypeUtils.retrieveAddressURLs(endpoint);
-          if (!urls.isEmpty()) {
-            details.put(endpoint.api, new MonitorDetail(urls.get(0).toString(), true));
-          }
-        } catch (InvalidRecordException  | MalformedURLException ignored) {
-          // Ignored
-        }
-
-      }
-
-    }
-  }
-
-  @Override
-  public void applyInitialRegistryDefinitions(URL amWebURI,
-      ServiceRecord serviceRecord)
-    throws IOException {
-      this.amWebAPI = amWebURI;
-  }
-
-  /**
-   * {@inheritDoc}
-   * 
-   * 
-   * @return The base implementation returns the most recent containers first.
-   */
-  @Override
-  public ContainerReleaseSelector createContainerReleaseSelector() {
-    return new MostRecentContainerReleaseSelector();
-  }
-
-  @Override
-  public void releaseAssignedContainer(ContainerId containerId) {
-    // no-op
-  }
-
-  @Override
-  public void addContainerRequest(AMRMClient.ContainerRequest req) {
-    // no-op
-  }
-
-  @Override
-  public void cancelSingleRequest(AMRMClient.ContainerRequest request) {
-    // no-op
-  }
-
-  @Override
-  public int cancelContainerRequests(Priority priority1,
-      Priority priority2,
-      int count) {
-    return 0;
-  }
-
-  @Override
-  public void updateBlacklist(List<String> blacklistAdditions,
-      List<String> blacklistRemovals) {
-    // no-op
-  }
-
-  @Override
-  public void execute(List<AbstractRMOperation> operations) {
-    for (AbstractRMOperation operation : operations) {
-      operation.execute(this);
-    }
-  }
-  /**
-   * No-op implementation of this method.
-   */
-  @Override
-  public void rebuildContainerDetails(List<Container> liveContainers,
-      String applicationId, Map<Integer, ProviderRole> providerRoles) {
-  }
-
-  @Override
-  public boolean processContainerStatus(ContainerId containerId,
-      ContainerStatus status) {
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderCore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderCore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderCore.java
index 9767430..b07fc29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderCore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderCore.java
@@ -19,9 +19,6 @@
 package org.apache.slider.providers;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTree;
-import org.apache.slider.core.exceptions.SliderException;
 
 import java.util.List;
 public interface ProviderCore {
@@ -31,13 +28,4 @@ public interface ProviderCore {
   List<ProviderRole> getRoles();
 
   Configuration getConf();
-
-  /**
-   * Verify that an instance definition is considered valid by the provider
-   * @param instanceDefinition instance definition
-   * @throws SliderException if the configuration is not valid
-   */
-  void validateInstanceDefinition(AggregateConf instanceDefinition) throws
-      SliderException;
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
index 761ac0f..e0299e7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
@@ -18,7 +18,7 @@
 
 package org.apache.slider.providers;
 
-import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.api.resource.Component;
 
 /**
  * Provider role and key for use in app requests.
@@ -34,16 +34,8 @@ public final class ProviderRole {
   public int nodeFailureThreshold;
   public final long placementTimeoutSeconds;
   public final String labelExpression;
+  public final Component component;
 
-  public ProviderRole(String name, int id) {
-    this(name,
-        name,
-        id,
-        PlacementPolicy.DEFAULT,
-        ResourceKeys.DEFAULT_NODE_FAILURE_THRESHOLD,
-        ResourceKeys.DEFAULT_PLACEMENT_ESCALATE_DELAY_SECONDS,
-        ResourceKeys.DEF_YARN_LABEL_EXPRESSION);
-  }
 
   /**
    * Create a provider role
@@ -67,7 +59,7 @@ public final class ProviderRole {
         policy,
         nodeFailureThreshold,
         placementTimeoutSeconds,
-        labelExpression);
+        labelExpression, null);
   }
 
   /**
@@ -87,7 +79,8 @@ public final class ProviderRole {
       int policy,
       int nodeFailureThreshold,
       long placementTimeoutSeconds,
-      String labelExpression) {
+      String labelExpression,
+      Component component) {
     this.name = name;
     if (group == null) {
       this.group = name;
@@ -99,6 +92,8 @@ public final class ProviderRole {
     this.nodeFailureThreshold = nodeFailureThreshold;
     this.placementTimeoutSeconds = placementTimeoutSeconds;
     this.labelExpression = labelExpression;
+    this.component = component;
+
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
index 4ca9326..c80de7f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
@@ -18,18 +18,15 @@
 
 package org.apache.slider.providers;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.registry.client.types.ServiceRecord;
 import org.apache.hadoop.service.Service;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.slider.api.ClusterDescription;
+import org.apache.slider.api.resource.Application;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.MapOperations;
-import org.apache.slider.core.exceptions.BadCommandArgumentsException;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.launch.ContainerLauncher;
 import org.apache.slider.core.main.ExitCodeProvider;
@@ -45,128 +42,17 @@ import java.net.URL;
 import java.util.List;
 import java.util.Map;
 
-public interface ProviderService extends ProviderCore,
-    Service,
-    RMOperationHandlerActions,
-    ExitCodeProvider {
+public interface ProviderService extends Service {
 
   /**
    * Set up the entire container launch context
-   * @param containerLauncher
-   * @param instanceDefinition
-   * @param container
-   * @param providerRole
-   * @param sliderFileSystem
-   * @param generatedConfPath
-   * @param appComponent
-   * @param containerTmpDirPath
    */
   void buildContainerLaunchContext(ContainerLauncher containerLauncher,
-      AggregateConf instanceDefinition,
-      Container container,
-      ProviderRole providerRole,
-      SliderFileSystem sliderFileSystem,
-      Path generatedConfPath,
-      MapOperations resourceComponent,
-      MapOperations appComponent,
-      Path containerTmpDirPath) throws
-      IOException,
-      SliderException;
+      Application application, Container container, ProviderRole providerRole,
+      SliderFileSystem sliderFileSystem) throws IOException, SliderException;
 
-  /**
-   * Notify the providers of container completion
-   * @param containerId container that has completed
-   */
-  void notifyContainerCompleted(ContainerId containerId);
-
-  /**
-   * Execute a process in the AM
-   * @param instanceDefinition cluster description
-   * @param confDir configuration directory
-   * @param env environment
-   * @param execInProgress the callback for the exec events
-   * @return true if a process was actually started
-   * @throws IOException
-   * @throws SliderException
-   */
-  boolean exec(AggregateConf instanceDefinition,
-               File confDir,
-               Map<String, String> env,
-               ProviderCompleted execInProgress) throws IOException,
-      SliderException;
-
-  /**
-   * Scan through the roles and see if it is supported.
-   * @param role role to look for
-   * @return true if the role is known about -and therefore
-   * that a launcher thread can be deployed to launch it
-   */
-  boolean isSupportedRole(String role);
-
-  /**
-   * Load a specific XML configuration file for the provider config
-   * @param confDir configuration directory
-   * @return a configuration to be included in status
-   * @throws BadCommandArgumentsException
-   * @throws IOException
-   */
-  Configuration loadProviderConfigurationInformation(File confDir)
-    throws BadCommandArgumentsException, IOException;
 
-  /**
-   * The application configuration should be initialized here
-   * 
-   * @param instanceDefinition
-   * @param fileSystem
-   * @param roleGroup
-   * @throws IOException
-   * @throws SliderException
-   */
-  void initializeApplicationConfiguration(AggregateConf instanceDefinition,
-      SliderFileSystem fileSystem, String roleGroup) throws IOException,
-      SliderException;
-
-  /**
-   * This is a validation of the application configuration on the AM.
-   * Here is where things like the existence of keytabs and other
-   * not-seen-client-side properties can be tested, before
-   * the actual process is spawned. 
-   * @param instanceDefinition clusterSpecification
-   * @param confDir configuration directory
-   * @param secure flag to indicate that secure mode checks must exist
-   * @throws IOException IO problemsn
-   * @throws SliderException any failure
-   */
-  void validateApplicationConfiguration(AggregateConf instanceDefinition,
-                                        File confDir,
-                                        boolean secure
-                                       ) throws IOException, SliderException;
-
-  /*
-     * Build the provider status, can be empty
-     * @return the provider status - map of entries to add to the info section
-     */
-  Map<String, String> buildProviderStatus();
-  
-  /**
-   * Build a map of data intended for the AM webapp that is specific
-   * about this provider. The key is some text to be displayed, and the
-   * value can be a URL that will create an anchor over the key text.
-   * 
-   * If no anchor is needed/desired, insert the key with a null value.
-   * @return the details
-   */
-  Map<String, MonitorDetail> buildMonitorDetails(ClusterDescription clusterSpec);
-
-  /**
-   * Get a human friendly name for web UIs and messages
-   * @return a name string. Default is simply the service instance name.
-   */
-  String getHumanName();
-
-  public void bind(StateAccessForProviders stateAccessor,
-      QueueAccess queueAccess,
-      List<Container> liveContainers);
+  void setAMState(StateAccessForProviders stateAccessForProviders);
 
   /**
    * Bind to the YARN registry
@@ -175,39 +61,6 @@ public interface ProviderService extends ProviderCore,
   void bindToYarnRegistry(YarnRegistryViewForProviders yarnRegistry);
 
   /**
-   * Build up the endpoint details for this service
-   * @param details
-   */
-  void buildEndpointDetails(Map<String, MonitorDetail> details);
-
-  /**
-   * Prior to going live -register the initial service registry data
-   * @param amWebURI URL to the AM. This may be proxied, so use relative paths
-   * @param serviceRecord service record to build up
-   */
-  void applyInitialRegistryDefinitions(URL amWebURI,
-      ServiceRecord serviceRecord)
-      throws IOException;
-
-  /**
-   * Create the container release selector for this provider...any policy
-   * can be implemented
-   * @return the selector to use for choosing containers.
-   */
-  ContainerReleaseSelector createContainerReleaseSelector();
-
-  /**
-   * On AM restart (for whatever reason) this API is required to rebuild the AM
-   * internal state with the containers which were already assigned and running
-   * 
-   * @param liveContainers
-   * @param applicationId
-   * @param providerRoles
-   */
-  void rebuildContainerDetails(List<Container> liveContainers,
-      String applicationId, Map<Integer, ProviderRole> providerRoles);
-
-  /**
    * Process container status
    * @return true if status needs to be requested again, false otherwise
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
index f33db9b..f8ec976 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
@@ -18,7 +18,6 @@
 
 package org.apache.slider.providers;
 
-import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
@@ -37,24 +36,21 @@ import org.apache.slider.api.InternalKeys;
 import org.apache.slider.api.OptionKeys;
 import org.apache.slider.api.ResourceKeys;
 import org.apache.slider.api.RoleKeys;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.api.resource.ConfigFile;
+import org.apache.slider.api.resource.Configuration;
 import org.apache.slider.common.SliderKeys;
 import org.apache.slider.common.SliderXmlConfKeys;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.common.tools.SliderUtils;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTreeOperations;
-import org.apache.slider.core.conf.MapOperations;
 import org.apache.slider.core.exceptions.BadCommandArgumentsException;
-import org.apache.slider.core.exceptions.BadConfigException;
 import org.apache.slider.core.exceptions.NoSuchNodeException;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.launch.ContainerLauncher;
 import org.apache.slider.core.registry.docstore.ConfigFormat;
-import org.apache.slider.core.registry.docstore.ConfigUtils;
-import org.apache.slider.core.registry.docstore.ExportEntry;
 import org.apache.slider.core.registry.docstore.PublishedConfiguration;
 import org.apache.slider.core.registry.docstore.PublishedConfigurationOutputter;
-import org.apache.slider.core.registry.docstore.PublishedExports;
 import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
 import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
@@ -66,16 +62,10 @@ import java.io.IOException;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Date;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
 import java.util.regex.Pattern;
 
 /**
@@ -114,7 +104,7 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
    */
   public static boolean addProviderJar(
       Map<String, LocalResource> providerResources,
-      Object provider,
+      Class providerClass,
       String jarName,
       SliderFileSystem sliderFileSystem,
       Path tempPath,
@@ -125,7 +115,7 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
     try {
       SliderUtils.putJar(providerResources,
           sliderFileSystem,
-          provider.getClass(),
+          providerClass,
           tempPath,
           libdir,
           jarName);
@@ -138,32 +128,6 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
       }
     }
   }
-
-  /**
-   * Add/overwrite the agent tarball (overwritten every time application is
-   * restarted).
-   * @param provider an instance of a provider class
-   * @param tarName name of the tarball to upload
-   * @param sliderFileSystem the file system
-   * @param agentDir directory to upload to
-   * @return true the location could be determined and the file added
-   * @throws IOException if the upload fails
-   */
-  public static boolean addAgentTar(Object provider,
-                                    String tarName,
-                                    SliderFileSystem sliderFileSystem,
-                                    Path agentDir) throws
-  IOException {
-    File localFile = SliderUtils.findContainingJar(provider.getClass());
-    if(localFile != null) {
-      String parentDir = localFile.getParent();
-      Path agentTarPath = new Path(parentDir, tarName);
-      sliderFileSystem.getFileSystem().copyFromLocalFile(false, true,
-          agentTarPath, agentDir);
-      return true;
-    }
-    return false;
-  }
   
   /**
    * Loads all dependency jars from the default path.
@@ -193,132 +157,24 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
         libDir, libLocalSrcDir);
   }
 
-
-  /**
-   * Validate the requested number of instances of a component.
-   * <p>
-   * If max &lt;= 0:  min &lt;= count
-   * If max &gt; 0:  min &lt;= count &lt;= max
-   * @param instanceDescription configuration
-   * @param name node class name
-   * @param min requested heap size
-   * @param max maximum value.
-   * @throws BadCommandArgumentsException if the values are out of range
-   */
-  public void validateNodeCount(AggregateConf instanceDescription,
-                                String name, int min, int max)
-      throws BadCommandArgumentsException {
-    MapOperations component =
-        instanceDescription.getResourceOperations().getComponent(name);
-    int count;
-    if (component == null) {
-      count = 0;
-    } else {
-      count = component.getOptionInt(ResourceKeys.COMPONENT_INSTANCES, 0);
-    }
-    validateNodeCount(name, count, min, max);
-  }
-  
-  /**
-   * Validate the count is between min and max.
-   * <p>
-   * If max &lt;= 0:  min &lt;= count
-   * If max &gt; 0:  min &lt;= count &lt;= max
-   * @param name node class name
-   * @param count requested node count
-   * @param min requested heap size
-   * @param max maximum value. 
-   * @throws BadCommandArgumentsException if the values are out of range
-   */
-  public void validateNodeCount(String name,
-                                int count,
-                                int min,
-                                int max) throws BadCommandArgumentsException {
-    if (count < min) {
-      throw new BadCommandArgumentsException(
-        "requested no of %s nodes: %d is below the minimum of %d", name, count,
-        min);
-    }
-    if (max > 0 && count > max) {
-      throw new BadCommandArgumentsException(
-        "requested no of %s nodes: %d is above the maximum of %d", name, count,
-        max);
-    }
-  }
-
-  /**
-   * Copy options beginning with "site.configName." prefix from options map
-   * to sitexml map, removing the prefix and substituting the tokens
-   * specified in the tokenMap.
-   * @param options source map
-   * @param sitexml destination map
-   * @param configName optional ".configName" portion of the prefix
-   * @param tokenMap key/value pairs to substitute into the option values
-   */
-  public void propagateSiteOptions(Map<String, String> options,
-      Map<String, String> sitexml,
-      String configName,
-      Map<String,String> tokenMap) {
-    String prefix = OptionKeys.SITE_XML_PREFIX +
-        (!configName.isEmpty() ? configName + "." : "");
-    propagateOptions(options, sitexml, tokenMap, prefix);
-  }
-
-  /**
-   * Copy options beginning with prefix from options map
-   * to sitexml map, removing the prefix and substituting the tokens
-   * specified in the tokenMap.
-   * @param options source map
-   * @param sitexml destination map
-   * @param tokenMap key/value pairs to substitute into the option values
-   * @param prefix which options to copy to destination map
-   */
-  public void propagateOptions(Map<String, String> options,
-                                   Map<String, String> sitexml,
-                                   Map<String,String> tokenMap,
-                                   String prefix) {
-    for (Map.Entry<String, String> entry : options.entrySet()) {
-      String key = entry.getKey();
-      if (key.startsWith(prefix)) {
-        String envName = key.substring(prefix.length());
-        if (!envName.isEmpty()) {
-          String value = entry.getValue();
-          if (tokenMap != null) {
-            for (Map.Entry<String,String> token : tokenMap.entrySet()) {
-              value = value.replaceAll(Pattern.quote(token.getKey()),
-                                       token.getValue());
-            }
-          }
-          sitexml.put(envName, value);
-        }
-      }
-    }
-  }
-
-  /**
-   * Substitute tokens into option map values, returning a new map.
-   * @param options source map
-   * @param tokenMap key/value pairs to substitute into the option values
-   * @return map with substituted values
-   */
-  public Map<String, String> filterSiteOptions(Map<String, String> options,
+  // Build key -> value map
+  // value will be substituted by corresponding data in tokenMap
+  public Map<String, String> substituteConfigs(Map<String, String> configs,
       Map<String, String> tokenMap) {
-    String prefix = OptionKeys.SITE_XML_PREFIX;
     String format = "${%s}";
     Map<String, String> filteredOptions = new HashMap<>();
-    for (Map.Entry<String, String> entry : options.entrySet()) {
+    for (Map.Entry<String, String> entry : configs.entrySet()) {
       String key = entry.getKey();
-      if (key.startsWith(prefix)) {
-        String value = entry.getValue();
-        if (tokenMap != null) {
-          for (Map.Entry<String,String> token : tokenMap.entrySet()) {
-            value = value.replaceAll(Pattern.quote(token.getKey()),
-                token.getValue());
-          }
+      String value = entry.getValue();
+      if (tokenMap != null) {
+        for (Map.Entry<String, String> token : tokenMap.entrySet()) {
+          value =
+              value.replaceAll(Pattern.quote(token.getKey()), token.getValue());
         }
-        filteredOptions.put(String.format(format, key), value);
       }
+      filteredOptions.put(String.format(format, key), value);
     }
+
     return filteredOptions;
   }
 
@@ -345,28 +201,27 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
     return intVal;
   }
 
+
   /**
    * Localize the service keytabs for the application.
    * @param launcher container launcher
-   * @param instanceDefinition app specification
    * @param fileSystem file system
-   * @param clusterName app name
    * @throws IOException trouble uploading to HDFS
    */
   public void localizeServiceKeytabs(ContainerLauncher launcher,
-      AggregateConf instanceDefinition, SliderFileSystem fileSystem,
-      String clusterName) throws IOException {
-    ConfTreeOperations appConf = instanceDefinition.getAppConfOperations();
-    String keytabPathOnHost = appConf.getComponent(COMPONENT_AM).get(
-            SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
+      SliderFileSystem fileSystem, Application application) throws IOException {
+
+    Configuration conf = application.getConfiguration();
+    String keytabPathOnHost =
+        conf.getProperty(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
     if (SliderUtils.isUnset(keytabPathOnHost)) {
-      String amKeytabName = appConf.getComponent(COMPONENT_AM).get(
-              SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
-      String keytabDir = appConf.getComponent(COMPONENT_AM).get(
-              SliderXmlConfKeys.KEY_HDFS_KEYTAB_DIR);
+      String amKeytabName =
+          conf.getProperty(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
+      String keytabDir =
+          conf.getProperty(SliderXmlConfKeys.KEY_HDFS_KEYTAB_DIR);
       // we need to localize the keytab files in the directory
       Path keytabDirPath = fileSystem.buildKeytabPath(keytabDir, null,
-          clusterName);
+          application.getName());
       boolean serviceKeytabsDeployed = false;
       if (fileSystem.getFileSystem().exists(keytabDirPath)) {
         FileStatus[] keytabs = fileSystem.getFileSystem().listStatus(
@@ -395,591 +250,119 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
   }
 
 
-  /**
-   * Upload a local file to the cluster security dir in HDFS. If the file
-   * already exists, it is not replaced.
-   * @param resource file to upload
-   * @param fileSystem file system
-   * @param clusterName app name
-   * @return Path of the uploaded file
-   * @throws IOException file cannot be uploaded
-   */
-  private Path uploadSecurityResource(File resource,
-      SliderFileSystem fileSystem, String clusterName) throws IOException {
-    Path certsDir = fileSystem.buildClusterSecurityDirPath(clusterName);
-    return uploadResource(resource, fileSystem, certsDir);
-  }
-
-  /**
-   * Upload a local file to the cluster resources dir in HDFS. If the file
-   * already exists, it is not replaced.
-   * @param resource file to upload
-   * @param fileSystem file system
-   * @param roleName optional subdirectory (for component-specific resources)
-   * @param clusterName app name
-   * @return Path of the uploaded file
-   * @throws IOException file cannot be uploaded
-   */
-  private Path uploadResource(File resource, SliderFileSystem fileSystem,
-      String roleName, String clusterName) throws IOException {
-    Path dir;
-    if (roleName == null) {
-      dir = fileSystem.buildClusterResourcePath(clusterName);
-    } else {
-      dir = fileSystem.buildClusterResourcePath(clusterName, roleName);
-    }
-    return uploadResource(resource, fileSystem, dir);
-  }
-
-  /**
-   * Upload a local file to a specified HDFS directory. If the file already
-   * exists, it is not replaced.
-   * @param resource file to upload
-   * @param fileSystem file system
-   * @param parentDir destination directory in HDFS
-   * @return Path of the uploaded file
-   * @throws IOException file cannot be uploaded
-   */
-  private synchronized Path uploadResource(File resource,
-      SliderFileSystem fileSystem, Path parentDir) throws IOException {
-    if (!fileSystem.getFileSystem().exists(parentDir)) {
-      fileSystem.getFileSystem().mkdirs(parentDir,
+  // 1. Create all config files for a component on hdfs for localization
+  // 2. Add the config file to localResource
+  //TODO handle Template format config file
+  public void createConfigFileAndAddLocalResource(ContainerLauncher launcher,
+      SliderFileSystem fs, Component component,
+      Map<String, String> tokensForSubstitution,
+      StateAccessForProviders amState) throws IOException {
+    Path compDir =
+        new Path(new Path(fs.getAppDir(), "components"), component.getName());
+    if (!fs.getFileSystem().exists(compDir)) {
+      fs.getFileSystem().mkdirs(compDir,
           new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE));
-    }
-    Path destPath = new Path(parentDir, resource.getName());
-    if (!fileSystem.getFileSystem().exists(destPath)) {
-      FSDataOutputStream os = null;
-      try {
-        os = fileSystem.getFileSystem().create(destPath);
-        byte[] contents = FileUtils.readFileToByteArray(resource);
-        os.write(contents, 0, contents.length);
-        os.flush();
-      } finally {
-        IOUtils.closeStream(os);
-      }
-      log.info("Uploaded {} to localization path {}", resource, destPath);
+      log.info("Creating component dir: " + compDir);
     } else {
-      log.info("Resource {} already existed at localization path {}", resource,
-          destPath);
-    }
-
-    while (!fileSystem.getFileSystem().exists(destPath)) {
-      try {
-        Thread.sleep(500);
-      } catch (InterruptedException e) {
-        // ignore
-      }
-    }
-
-    fileSystem.getFileSystem().setPermission(destPath,
-        new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));
-
-    return destPath;
-  }
-
-  /**
-   * Write a configuration property map to a local file in a specified format.
-   * @param fileSystem file system
-   * @param file destination file
-   * @param configFormat file format
-   * @param configFileDN file description
-   * @param config properties to save to the file
-   * @param clusterName app name
-   * @throws IOException file cannot be created
-   */
-  private synchronized void createConfigFile(SliderFileSystem fileSystem,
-      File file, ConfigFormat configFormat, String configFileDN,
-      Map<String, String> config, String clusterName) throws IOException {
-    if (file.exists()) {
-      log.info("Skipping writing {} file {} because it already exists",
-          configFormat, file);
-      return;
-    }
-    log.info("Writing {} file {}", configFormat, file);
-
-    ConfigUtils.prepConfigForTemplateOutputter(configFormat, config,
-        fileSystem, clusterName, file.getName());
-    PublishedConfiguration publishedConfiguration =
-        new PublishedConfiguration(configFileDN,
-            config.entrySet());
-    PublishedConfigurationOutputter configurationOutputter =
-        PublishedConfigurationOutputter.createOutputter(configFormat,
-            publishedConfiguration);
-    configurationOutputter.save(file);
-  }
-
-  /**
-   * Determine config files requested in the appConf, generate the files, and
-   * localize them.
-   * @param launcher container launcher
-   * @param roleName component name
-   * @param roleGroup component group
-   * @param appConf app configurations
-   * @param configs configurations grouped by config name
-   * @param env environment variables
-   * @param fileSystem file system
-   * @param clusterName app name
-   * @throws IOException file(s) cannot be uploaded
-   * @throws BadConfigException file name not specified or file format not
-   * supported
-   */
-  public void localizeConfigFiles(ContainerLauncher launcher,
-      String roleName, String roleGroup,
-      ConfTreeOperations appConf,
-      Map<String, Map<String, String>> configs,
-      MapOperations env,
-      SliderFileSystem fileSystem,
-      String clusterName)
-      throws IOException, BadConfigException {
-    for (Entry<String, Map<String, String>> configEntry : configs.entrySet()) {
-      String configFileName = appConf.getComponentOpt(roleGroup,
-          OptionKeys.CONF_FILE_PREFIX + configEntry.getKey() + OptionKeys
-              .NAME_SUFFIX, null);
-      String configFileType = appConf.getComponentOpt(roleGroup,
-          OptionKeys.CONF_FILE_PREFIX + configEntry.getKey() + OptionKeys
-              .TYPE_SUFFIX, null);
-      if (configFileName == null && configFileType == null) {
-        // config file not requested, so continue
-        continue;
-      }
-      if (configFileName == null) {
-        throw new BadConfigException("Config file name null for " +
-            configEntry.getKey());
-      }
-      if (configFileType == null) {
-        throw new BadConfigException("Config file type null for " +
-            configEntry.getKey());
-      }
-      ConfigFormat configFormat = ConfigFormat.resolve(configFileType);
-      if (configFormat == null) {
-        throw new BadConfigException("Config format " + configFileType +
-            " doesn't exist");
-      }
-      boolean perComponent = appConf.getComponentOptBool(roleGroup,
-          OptionKeys.CONF_FILE_PREFIX + configEntry.getKey() + OptionKeys
-              .PER_COMPONENT, false);
-      boolean perGroup = appConf.getComponentOptBool(roleGroup,
-          OptionKeys.CONF_FILE_PREFIX + configEntry.getKey() + OptionKeys
-              .PER_GROUP, false);
-
-      localizeConfigFile(launcher, roleName, roleGroup, configEntry.getKey(),
-          configFormat, configFileName, configs, env, fileSystem,
-          clusterName, perComponent, perGroup);
-    }
-  }
-
-  /**
-   * Create and localize a config file.
-   * @param launcher container launcher
-   * @param roleName component name
-   * @param roleGroup component group
-   * @param configFileDN config description/name
-   * @param configFormat config format
-   * @param configFileName config file name
-   * @param configs configs grouped by config description/name
-   * @param env environment variables
-   * @param fileSystem file system
-   * @param clusterName app name
-   * @param perComponent true if file should be created per unique component
-   * @param perGroup true if file should be created per component group
-   * @throws IOException file cannot be uploaded
-   */
-  public void localizeConfigFile(ContainerLauncher launcher,
-      String roleName, String roleGroup,
-      String configFileDN, ConfigFormat configFormat, String configFileName,
-      Map<String, Map<String, String>> configs,
-      MapOperations env,
-      SliderFileSystem fileSystem,
-      String clusterName,
-      boolean perComponent,
-      boolean perGroup)
-      throws IOException {
-    if (launcher == null) {
+      log.info("Component conf dir already exists: " + compDir);
       return;
     }
-    Map<String, String> config = ConfigUtils.replacePropsInConfig(
-        configs.get(configFileDN), env.options);
-    String fileName = ConfigUtils.replaceProps(config, configFileName);
-    File localFile = new File(RESOURCE_DIR);
-    if (!localFile.exists()) {
-      if (!localFile.mkdir() && !localFile.exists()) {
-        throw new IOException(RESOURCE_DIR + " could not be created!");
-      }
-    }
-
-    String folder = null;
-    if (perComponent) {
-      folder = roleName;
-    } else if (perGroup) {
-      folder = roleGroup;
-    }
-    if (folder != null) {
-      localFile = new File(localFile, folder);
-      if (!localFile.exists()) {
-        if (!localFile.mkdir() && !localFile.exists()) {
-          throw new IOException(localFile + " could not be created!");
-        }
-      }
-    }
-    localFile = new File(localFile, new File(fileName).getName());
-
-    log.info("Localizing {} configs to config file {} (destination {}) " +
-            "based on {} configs", config.size(), localFile, fileName,
-        configFileDN);
-    if (!localFile.exists()) {
-      createConfigFile(fileSystem, localFile, configFormat, configFileDN,
-          config, clusterName);
-    } else {
-      log.info("Local {} file {} already exists", configFormat, localFile);
-    }
-    Path destPath = uploadResource(localFile, fileSystem, folder, clusterName);
-    LocalResource configResource = fileSystem.createAmResource(destPath,
-        LocalResourceType.FILE);
-
-    File destFile = new File(fileName);
-    if (destFile.isAbsolute()) {
-      launcher.addLocalResource(
-          RESOURCE_DIR + "/" + destFile.getName(),
-          configResource, fileName);
-    } else {
-      launcher.addLocalResource(APP_CONF_DIR + "/" + fileName,
-          configResource);
-    }
-  }
 
-  /**
-   * Localize application tarballs and other resources requested by the app.
-   * @param launcher container launcher
-   * @param fileSystem file system
-   * @param appConf app configurations
-   * @param roleGroup component group
-   * @param clusterName app name
-   * @throws IOException resources cannot be uploaded
-   * @throws BadConfigException package name or type is not specified
-   */
-  public void localizePackages(ContainerLauncher launcher,
-      SliderFileSystem fileSystem, ConfTreeOperations appConf, String roleGroup,
-      String clusterName) throws IOException, BadConfigException {
-    for (Entry<String, Map<String, String>> pkg :
-        getPackages(roleGroup, appConf).entrySet()) {
-      String pkgName = pkg.getValue().get(OptionKeys.NAME_SUFFIX);
-      String pkgType = pkg.getValue().get(OptionKeys.TYPE_SUFFIX);
-      Path pkgPath = fileSystem.buildResourcePath(pkgName);
-      if (!fileSystem.isFile(pkgPath)) {
-        pkgPath = fileSystem.buildResourcePath(clusterName,
-            pkgName);
-      }
-      if (!fileSystem.isFile(pkgPath)) {
-        throw new IOException("Package doesn't exist as a resource: " +
-            pkgName);
+    for (ConfigFile configFile : component.getConfiguration().getFiles()) {
+      String fileName = configFile.getSrcFile();
+      // substitute file name
+      for (Map.Entry<String, String> token : tokensForSubstitution.entrySet()) {
+        configFile.setDestFile(configFile.getDestFile()
+            .replaceAll(Pattern.quote(token.getKey()), token.getValue()));
       }
-      log.info("Adding resource {}", pkgName);
-      LocalResourceType type = LocalResourceType.FILE;
-      if ("archive".equals(pkgType)) {
-        type = LocalResourceType.ARCHIVE;
-      }
-      LocalResource packageResource = fileSystem.createAmResource(
-          pkgPath, type);
-      launcher.addLocalResource(APP_PACKAGES_DIR, packageResource);
-    }
-  }
-
-  /**
-   * Build a map of configuration description/name to configuration key/value
-   * properties, with all known tokens substituted into the property values.
-   * @param appConf app configurations
-   * @param internalsConf internal configurations
-   * @param containerId container ID
-   * @param roleName component name
-   * @param roleGroup component group
-   * @param amState access to AM state
-   * @return configuration properties grouped by config description/name
-   */
-  public Map<String, Map<String, String>> buildConfigurations(
-      ConfTreeOperations appConf, ConfTreeOperations internalsConf,
-      String containerId, String clusterName, String roleName, String roleGroup,
-      StateAccessForProviders amState) {
-
-    Map<String, Map<String, String>> configurations = new TreeMap<>();
-    Map<String, String> tokens = getStandardTokenMap(appConf,
-        internalsConf, roleName, roleGroup, containerId, clusterName);
-
-    Set<String> configs = new HashSet<>();
-    configs.addAll(getApplicationConfigurationTypes(roleGroup, appConf));
-    configs.addAll(getSystemConfigurationsRequested(appConf));
-
-    for (String configType : configs) {
-      addNamedConfiguration(configType, appConf.getGlobalOptions().options,
-          configurations, tokens, amState);
-      if (appConf.getComponent(roleGroup) != null) {
-        addNamedConfiguration(configType,
-            appConf.getComponent(roleGroup).options, configurations, tokens,
-            amState);
-      }
-    }
-
-    //do a final replacement of re-used configs
-    dereferenceAllConfigs(configurations);
-
-    return configurations;
-  }
-
-  /**
-   * Substitute "site." prefixed configuration values into other configuration
-   * values where needed. The format for these substitutions is that
-   * {@literal ${@//site/configDN/key}} will be replaced by the value for the
-   * "site.configDN.key" property.
-   * @param configurations configuration properties grouped by config
-   *                       description/name
-   */
-  public void dereferenceAllConfigs(
-      Map<String, Map<String, String>> configurations) {
-    Map<String, String> allConfigs = new HashMap<>();
-    String lookupFormat = "${@//site/%s/%s}";
-    for (Map.Entry<String, Map<String, String>> entry : configurations.entrySet()) {
-      Map<String, String> configBucket = entry.getValue();
-      for(Map.Entry<String, String> config: configBucket.entrySet()) {
-        allConfigs.put(String.format(lookupFormat, entry.getKey(), config.getKey()),
-            config.getValue());
-      }
-    }
-
-    boolean finished = false;
-    while (!finished) {
-      finished = true;
-      for (Map.Entry<String, String> entry : allConfigs.entrySet()) {
-        String configValue = entry.getValue();
-        for (Map.Entry<String, String> lookUpEntry : allConfigs.entrySet()) {
-          String lookUpValue = lookUpEntry.getValue();
-          if (lookUpValue.contains("${@//site/")) {
-            continue;
-          }
-          String lookUpKey = lookUpEntry.getKey();
-          if (configValue != null && configValue.contains(lookUpKey)) {
-            configValue = configValue.replace(lookUpKey, lookUpValue);
-          }
-        }
-        if (configValue != null && !configValue.equals(entry.getValue())) {
-          finished = false;
-          allConfigs.put(entry.getKey(), configValue);
-        }
-      }
-    }
-    for (Map.Entry<String, Map<String, String>> configEntry : configurations
-        .entrySet()) {
-      Map<String, String> configBucket = configEntry.getValue();
-      for (Map.Entry<String, String> entry: configBucket.entrySet()) {
-        String configName = entry.getKey();
-        String configValue = entry.getValue();
-        for (Map.Entry<String, String> lookUpEntry : allConfigs.entrySet()) {
-          String lookUpValue = lookUpEntry.getValue();
-          if (lookUpValue.contains("${@//site/")) {
-            continue;
-          }
-          String lookUpKey = lookUpEntry.getKey();
-          if (configValue != null && configValue.contains(lookUpKey)) {
-            configValue = configValue.replace(lookUpKey, lookUpValue);
+      // substitute configs
+      substituteConfigs(configFile.getProps(), tokensForSubstitution);
+
+      // write configs onto hdfs
+      PublishedConfiguration publishedConfiguration =
+          new PublishedConfiguration(fileName,
+              configFile.getProps().entrySet());
+      Path remoteFile = new Path(compDir, fileName);
+      if (!fs.getFileSystem().exists(remoteFile)) {
+        synchronized (this) {
+          if (!fs.getFileSystem().exists(remoteFile)) {
+            PublishedConfigurationOutputter configurationOutputter =
+                PublishedConfigurationOutputter.createOutputter(
+                    ConfigFormat.resolve(configFile.getType().toString()),
+                    publishedConfiguration);
+            FSDataOutputStream os = null;
+            try {
+              os = fs.getFileSystem().create(remoteFile);
+              configurationOutputter.save(os);
+              os.flush();
+              log.info("Created config file on hdfs: " + remoteFile);
+            } finally {
+              IOUtils.closeStream(os);
+            }
           }
         }
-        configBucket.put(configName, configValue);
-      }
-    }
-  }
-
-  /**
-   * Return a set of configuration description/names represented in the app.
-   * configuration
-   * @param roleGroup component group
-   * @param appConf app configurations
-   * @return set of configuration description/names
-   */
-  public Set<String> getApplicationConfigurationTypes(String roleGroup,
-      ConfTreeOperations appConf) {
-    Set<String> configList = new HashSet<>();
-
-    String prefix = OptionKeys.CONF_FILE_PREFIX;
-    String suffix = OptionKeys.TYPE_SUFFIX;
-    MapOperations component = appConf.getComponent(roleGroup);
-    if (component != null) {
-      addConfsToList(component, configList, prefix, suffix);
-    }
-    addConfsToList(appConf.getGlobalOptions(), configList, prefix, suffix);
-
-    return configList;
-  }
-
-  /**
-   * Finds all configuration description/names of the form
-   * prefixconfigDNsuffix in the configuration (e.g. conf.configDN.type).
-   * @param confMap configuration properties
-   * @param confList set containing configuration description/names
-   * @param prefix configuration key prefix to match
-   * @param suffix configuration key suffix to match
-   */
-  private void addConfsToList(Map<String, String> confMap,
-      Set<String> confList, String prefix, String suffix) {
-    for (Entry<String, String> entry : confMap.entrySet()) {
-      String key = entry.getKey();
-      if (key.startsWith(prefix) && key.endsWith(suffix)) {
-        String confName = key.substring(prefix.length(),
-            key.length() - suffix.length());
-        if (!confName.isEmpty()) {
-          confList.add(confName);
-        }
-      }
-    }
-  }
-
-  /**
-   * Build a map of package description/name to package key/value properties
-   * (there should be two properties, type and name).
-   * @param roleGroup component group
-   * @param appConf app configurations
-   * @return map of package description/name to package key/value properties
-   * @throws BadConfigException package name or type is not specified
-   */
-  public Map<String, Map<String, String>> getPackages(String roleGroup,
-      ConfTreeOperations appConf) throws BadConfigException {
-    Map<String, Map<String, String>> packages = new HashMap<>();
-    String prefix = OptionKeys.PKG_FILE_PREFIX;
-    String typeSuffix = OptionKeys.TYPE_SUFFIX;
-    String nameSuffix = OptionKeys.NAME_SUFFIX;
-    MapOperations component = appConf.getComponent(roleGroup);
-    if (component == null) {
-      component = appConf.getGlobalOptions();
-    }
-    for (Map.Entry<String, String> entry : component.entrySet()) {
-      String key = entry.getKey();
-      if (key.startsWith(prefix)) {
-        String confName;
-        String type;
-        if (key.endsWith(typeSuffix)) {
-          confName = key.substring(prefix.length(), key.length() - typeSuffix.length());
-          type = typeSuffix;
-        } else if (key.endsWith(nameSuffix)) {
-          confName = key.substring(prefix.length(), key.length() - nameSuffix.length());
-          type = nameSuffix;
-        } else {
-          continue;
-        }
-        if (!packages.containsKey(confName)) {
-          packages.put(confName, new HashMap<String, String>());
-        }
-        packages.get(confName).put(type, entry.getValue());
-      }
-    }
-
-    for (Entry<String, Map<String, String>> pkg : packages.entrySet()) {
-      if (!pkg.getValue().containsKey(OptionKeys.TYPE_SUFFIX)) {
-        throw new BadConfigException("Package " + pkg.getKey() + " doesn't " +
-            "have a package type");
       }
-      if (!pkg.getValue().containsKey(OptionKeys.NAME_SUFFIX)) {
-        throw new BadConfigException("Package " + pkg.getKey() + " doesn't " +
-            "have a package name");
-      }
-    }
-
-    return packages;
-  }
-
-  /**
-   * Return system configurations requested by the app.
-   * @param appConf app configurations
-   * @return set of system configurations
-   */
-  public Set<String> getSystemConfigurationsRequested(
-      ConfTreeOperations appConf) {
-    Set<String> configList = new HashSet<>();
 
-    String configTypes = appConf.get(SYSTEM_CONFIGS);
-    if (configTypes != null && configTypes.length() > 0) {
-      String[] configs = configTypes.split(",");
-      for (String config : configs) {
-        configList.add(config.trim());
+      // Publish configs
+      amState.getPublishedSliderConfigurations()
+          .put(configFile.getSrcFile(), publishedConfiguration);
+
+      // Add resource for localization
+      LocalResource configResource =
+          fs.createAmResource(remoteFile, LocalResourceType.FILE);
+      File destFile = new File(configFile.getDestFile());
+      //TODO why to we need to differetiate  RESOURCE_DIR vs APP_CONF_DIR
+      if (destFile.isAbsolute()) {
+        String symlink = RESOURCE_DIR + "/" + fileName;
+        launcher.addLocalResource(symlink, configResource,
+            configFile.getDestFile());
+        log.info("Add config file for localization: " + symlink + " -> "
+            + configResource.getResource().getFile() + ", dest mount path: "
+            + configFile.getDestFile());
+      } else {
+        String symlink = APP_CONF_DIR + "/" + fileName;
+        launcher.addLocalResource(symlink, configResource);
+        log.info("Add config file for localization: " + symlink + " -> "
+            + configResource.getResource().getFile());
       }
     }
-
-    return configList;
-  }
-
-  /**
-   * For a given config description/name, pull out its site configs from the
-   * source config map, remove the site.configDN. prefix from them, and place
-   * them into a new config map using the {@link #propagateSiteOptions} method
-   * (with tokens substituted). This new k/v map is put as the value for the
-   * configDN key in the configurations map.
-   * @param configName config description/name
-   * @param sourceConfig config containing site.* properties
-   * @param configurations configuration map to be populated
-   * @param tokens initial substitution tokens
-   * @param amState access to AM state
-   */
-  private void addNamedConfiguration(String configName,
-      Map<String, String> sourceConfig,
-      Map<String, Map<String, String>> configurations,
-      Map<String, String> tokens, StateAccessForProviders amState) {
-    Map<String, String> config = new HashMap<>();
-    if (configName.equals(GLOBAL_CONFIG_TAG)) {
-      addDefaultGlobalConfig(config);
-    }
-    // add role hosts to tokens
-    addRoleRelatedTokens(tokens, amState);
-    propagateSiteOptions(sourceConfig, config, configName, tokens);
-
-    configurations.put(configName, config);
   }
 
   /**
    * Get initial token map to be substituted into config values.
    * @param appConf app configurations
-   * @param internals internal configurations
-   * @param componentName component name
-   * @param componentGroup component group
-   * @param clusterName app name
-   * @return tokens to replace
-   */
-  public Map<String, String> getStandardTokenMap(ConfTreeOperations appConf,
-      ConfTreeOperations internals, String componentName,
-      String componentGroup, String clusterName) {
-    return getStandardTokenMap(appConf, internals, componentName,
-        componentGroup, null, clusterName);
-  }
-
-  /**
-   * Get initial token map to be substituted into config values.
-   * @param appConf app configurations
-   * @param internals internal configurations
    * @param componentName component name
    * @param componentGroup component group
    * @param containerId container ID
    * @param clusterName app name
    * @return tokens to replace
    */
-  public Map<String, String> getStandardTokenMap(ConfTreeOperations appConf,
-      ConfTreeOperations internals, String componentName,
+  public Map<String, String> getStandardTokenMap(
+      Configuration appConf, Configuration componentConf, String componentName,
       String componentGroup, String containerId, String clusterName) {
 
     Map<String, String> tokens = new HashMap<>();
     if (containerId != null) {
       tokens.put("${CONTAINER_ID}", containerId);
     }
-    String nnuri = appConf.get("site.fs.defaultFS");
-    tokens.put("${NN_URI}", nnuri);
-    tokens.put("${NN_HOST}", URI.create(nnuri).getHost());
-    tokens.put("${ZK_HOST}", appConf.get(OptionKeys.ZOOKEEPER_HOSTS));
-    tokens.put("${DEFAULT_ZK_PATH}", appConf.get(OptionKeys.ZOOKEEPER_PATH));
-    String prefix = appConf.getComponentOpt(componentGroup, ROLE_PREFIX,
-        null);
+    String nnuri = appConf.getProperty("fs.defaultFS");
+    if (nnuri != null && !nnuri.isEmpty()) {
+      tokens.put("${NN_URI}", nnuri);
+      tokens.put("${NN_HOST}", URI.create(nnuri).getHost());
+    }
+    tokens.put("${ZK_HOST}", appConf.getProperty(OptionKeys.ZOOKEEPER_HOSTS));
+    tokens.put("${DEFAULT_ZK_PATH}", appConf.getProperty(OptionKeys.ZOOKEEPER_PATH));
+    String prefix = componentConf.getProperty(ROLE_PREFIX);
     String dataDirSuffix = "";
     if (prefix == null) {
       prefix = "";
     } else {
       dataDirSuffix = "_" + SliderUtils.trimPrefix(prefix);
     }
-    tokens.put("${DEFAULT_DATA_DIR}", internals.getGlobalOptions()
-        .getOption(InternalKeys.INTERNAL_DATA_DIR_PATH, null) + dataDirSuffix);
-    tokens.put("${JAVA_HOME}", appConf.get(JAVA_HOME));
+    tokens.put("${DEFAULT_DATA_DIR}",
+        appConf.getProperty(InternalKeys.INTERNAL_DATA_DIR_PATH)
+            + dataDirSuffix);
+    tokens.put("${JAVA_HOME}", appConf.getProperty(JAVA_HOME));
     tokens.put("${COMPONENT_NAME}", componentName);
     tokens.put("${COMPONENT_NAME.lc}", componentName.toLowerCase());
     tokens.put("${COMPONENT_PREFIX}", prefix);
@@ -1005,7 +388,7 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
    * @param tokens existing tokens
    * @param amState access to AM state
    */
-  public void addRoleRelatedTokens(Map<String, String> tokens,
+  public void addRoleHostTokens(Map<String, String> tokens,
       StateAccessForProviders amState) {
     if (amState == null) {
       return;
@@ -1020,26 +403,6 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
   }
 
   /**
-   * Add global configuration properties.
-   * @param config map where default global properties will be added
-   */
-  private void addDefaultGlobalConfig(Map<String, String> config) {
-    config.put("app_log_dir", "${LOG_DIR}");
-    config.put("app_pid_dir", "${WORK_DIR}/app/run");
-    config.put("app_install_dir", "${WORK_DIR}/app/install");
-    config.put("app_conf_dir", "${WORK_DIR}/" + APP_CONF_DIR);
-    config.put("app_input_conf_dir", "${WORK_DIR}/" + PROPAGATED_CONF_DIR_NAME);
-
-    // add optional parameters only if they are not already provided
-    if (!config.containsKey("pid_file")) {
-      config.put("pid_file", "${WORK_DIR}/app/run/component.pid");
-    }
-    if (!config.containsKey("app_root")) {
-      config.put("app_root", "${WORK_DIR}/app/install");
-    }
-  }
-
-  /**
    * Return a list of hosts based on current ClusterNodes.
    * @param values cluster nodes
    * @param hostOnly whether host or host/server name will be added to list
@@ -1101,82 +464,4 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
           containerId, e);
     }
   }
-
-  /**
-   * Publish a named property bag that may contain name-value pairs for app
-   * configurations such as hbase-site.
-   * @param name config file identifying name
-   * @param description config file description
-   * @param entries config file properties
-   * @param amState access to AM state
-   */
-  public void publishApplicationInstanceData(String name, String description,
-      Iterable<Map.Entry<String, String>> entries,
-      StateAccessForProviders amState) {
-    PublishedConfiguration pubconf = new PublishedConfiguration(description,
-        entries);
-    log.info("publishing {}", pubconf);
-    amState.getPublishedSliderConfigurations().put(name, pubconf);
-  }
-
-  /**
-   * Publish an export group.
-   * @param exportGroup export groups
-   * @param amState access to AM state
-   * @param groupName export group name
-   */
-  public void publishExportGroup(
-      Map<String, Set<ExportEntry>> exportGroup,
-      StateAccessForProviders amState, String groupName) {
-    // Publish in old format for the time being
-    Map<String, String> simpleEntries = new HashMap<>();
-    for (Entry<String, Set<ExportEntry>> entry : exportGroup.entrySet()) {
-      Set<ExportEntry> exports = entry.getValue();
-      if (SliderUtils.isNotEmpty(exports)) {
-        Set<String> values = new TreeSet<>();
-        for (ExportEntry export : exports) {
-          values.add(export.getValue());
-        }
-        simpleEntries.put(entry.getKey(), StringUtils.join(",", values));
-      }
-    }
-    publishApplicationInstanceData(groupName, groupName,
-        simpleEntries.entrySet(), amState);
-
-    PublishedExports exports = new PublishedExports(groupName);
-    exports.setUpdated(new Date().getTime());
-    exports.putValues(exportGroup.entrySet());
-    amState.getPublishedExportsSet().put(groupName, exports);
-  }
-
-  public Map<String, String> getExports(ConfTreeOperations appConf,
-      String roleGroup) {
-    Map<String, String> exports = new HashMap<>();
-    propagateOptions(appConf.getComponent(roleGroup).options, exports,
-        null, OptionKeys.EXPORT_PREFIX);
-    return exports;
-  }
-
-  public String getGroupKey(String roleGroup, ConfTreeOperations appConf) {
-    String rolePrefix = appConf.getComponentOpt(roleGroup, ROLE_PREFIX, "");
-    return getNameOrGroupKey(rolePrefix, roleGroup);
-  }
-
-  public String getNameKey(String roleName, String roleGroup,
-      ConfTreeOperations appConf) {
-    String rolePrefix = appConf.getComponentOpt(roleGroup, ROLE_PREFIX, "");
-    return getNameOrGroupKey(rolePrefix, roleName);
-  }
-
-  public String getNameOrGroupKey(String rolePrefix, String roleNameOrGroup) {
-    if (!rolePrefix.isEmpty()) {
-      if (!roleNameOrGroup.startsWith(rolePrefix)) {
-        log.warn("Something went wrong, {} doesn't start with {}",
-            roleNameOrGroup, rolePrefix);
-        return null;
-      }
-      roleNameOrGroup = roleNameOrGroup.substring(rolePrefix.length());
-    }
-    return roleNameOrGroup.toUpperCase(Locale.ENGLISH);
-  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: YARN-6335. Port slider's groovy unit tests to yarn native services. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAOvercapacity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAOvercapacity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAOvercapacity.java
new file mode 100644
index 0000000..e339a0a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAOvercapacity.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.slider.core.main.LauncherExitCodes;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.NodeMap;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Test Anti-affine placement with a cluster of size 1.
+ */
+public class TestMockAppStateAAOvercapacity extends BaseMockAppStateAATest
+    implements MockRoles {
+
+  private static final int NODES = 1;
+
+  @Override
+  public MockYarnEngine createYarnEngine() {
+    return new MockYarnEngine(NODES, 1);
+  }
+
+  void assertAllContainersAA() {
+    assertAllContainersAA(getAaRole().getKey());
+  }
+
+  /**
+   *
+   * @throws Throwable
+   */
+  @Test
+  public void testOvercapacityRecovery() throws Throwable {
+    RoleStatus aaRole = getAaRole();
+
+    describe("Ask for 1 more than the no of available nodes;" +
+             "verify the state. kill the allocated container and review");
+    //more than expected
+    int desired = 3;
+    aaRole.setDesired(desired);
+    assertTrue(appState.getRoleHistory().canPlaceAANodes());
+
+    //first request
+    List<AbstractRMOperation> operations =
+        appState.reviewRequestAndReleaseNodes();
+    assertTrue(aaRole.isAARequestOutstanding());
+    assertEquals(1, aaRole.getRequested());
+    assertEquals(desired - 1, aaRole.getAAPending());
+    List<AbstractRMOperation> operationsOut = new ArrayList<>();
+    // allocate and re-submit
+    List<RoleInstance> instances = submitOperations(operations,
+        EMPTY_ID_LIST, operationsOut);
+    assertEquals(1, instances.size());
+    assertAllContainersAA();
+
+    // expect an outstanding AA request to be unsatisfied
+    assertTrue(aaRole.getRunning() < aaRole.getDesired());
+    assertEquals(0, aaRole.getRequested());
+    assertFalse(aaRole.isAARequestOutstanding());
+    assertEquals(desired - 1, aaRole.getAAPending());
+    List<Container> allocatedContainers = engine.execute(operations,
+        EMPTY_ID_LIST);
+    assertEquals(0, allocatedContainers.size());
+
+    // now lets trigger a failure
+    NodeMap nodemap = cloneNodemap();
+    assertEquals(1, nodemap.size());
+
+    RoleInstance instance = instances.get(0);
+    ContainerId cid = instance.getContainerId();
+
+    AppState.NodeCompletionResult result = appState.onCompletedContainer(
+        containerStatus(cid, LauncherExitCodes.EXIT_TASK_LAUNCH_FAILURE));
+    assertTrue(result.containerFailed);
+
+    assertEquals(1, aaRole.getFailed());
+    assertEquals(0, aaRole.getRunning());
+    List<NodeInstance> availablePlacements = appState.getRoleHistory()
+        .findNodeForNewAAInstance(aaRole);
+    assertEquals(1, availablePlacements.size());
+    describe("expecting a successful review with available placements of " +
+            availablePlacements);
+    operations = appState.reviewRequestAndReleaseNodes();
+    assertEquals(1, operations.size());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAPlacement.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAPlacement.java
new file mode 100644
index 0000000..eb25b40
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAPlacement.java
@@ -0,0 +1,380 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
+import org.apache.slider.api.types.NodeInformation;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.providers.PlacementPolicy;
+import org.apache.slider.server.appmaster.model.mock.MockAppState;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.AppState.NodeUpdatedOutcome;
+import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
+import org.apache.slider.server.appmaster.state.ContainerAssignment;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.slider.api.ResourceKeys.COMPONENT_PLACEMENT_POLICY;
+import static org.apache.slider.server.appmaster.model.mock.MockFactory.AAROLE_2;
+
+/**
+ * Test Anti-affine placement.
+ */
+public class TestMockAppStateAAPlacement extends BaseMockAppStateAATest
+    implements MockRoles {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestMockAppStateAAPlacement.class);
+
+  private static final int NODES = 3;
+
+  /**
+   * The YARN engine has a cluster with very few nodes (3) and lots of
+   * containers, so if AA placement isn't working, there will be affine
+   * placements surfacing.
+   * @return
+   */
+  @Override
+  public MockYarnEngine createYarnEngine() {
+    return new MockYarnEngine(NODES, 8);
+  }
+
+  /**
+   * This is the simplest AA allocation: no labels, so allocate anywhere.
+   * @throws Throwable
+   */
+  @Test
+  public void testAllocateAANoLabel() throws Throwable {
+    RoleStatus aaRole = getAaRole();
+
+    assertTrue(cloneNodemap().size() > 0);
+
+    // want multiple instances, so there will be iterations
+    aaRole.setDesired(2);
+
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    AMRMClient.ContainerRequest request = getSingleRequest(ops);
+    assertFalse(request.getRelaxLocality());
+    assertEquals(request.getNodes().size(), engine.getCluster()
+        .getClusterSize());
+    assertNull(request.getRacks());
+    assertNotNull(request.getCapability());
+
+    Container allocated = engine.allocateContainer(request);
+
+    // notify the container ane expect
+    List<ContainerAssignment> assignments = new ArrayList<>();
+    List<AbstractRMOperation> operations = new ArrayList<>();
+    appState.onContainersAllocated(Arrays.asList(allocated), assignments,
+        operations);
+
+    String host = allocated.getNodeId().getHost();
+    NodeInstance hostInstance = cloneNodemap().get(host);
+    assertEquals(1, hostInstance.get(aaRole.getKey()).getStarting());
+    assertFalse(hostInstance.canHost(aaRole.getKey(), ""));
+    assertFalse(hostInstance.canHost(aaRole.getKey(), null));
+
+    // assignment
+    assertEquals(1, assignments.size());
+
+    // verify the release matches the allocation
+    assertEquals(2, operations.size());
+    assertNotNull(getCancel(operations, 0).getCapability().equals(allocated
+            .getResource()));
+
+    // we also expect a new allocation request to have been issued
+
+    ContainerRequest req2 = getRequest(operations, 1);
+    assertEquals(req2.getNodes().size(), engine.getCluster()
+        .getClusterSize() - 1);
+
+    assertFalse(req2.getNodes().contains(host));
+    assertFalse(request.getRelaxLocality());
+
+    // verify the pending couner is down
+    assertEquals(0L, aaRole.getAAPending());
+    Container allocated2 = engine.allocateContainer(req2);
+
+    // placement must be on a different host
+    assertNotEquals(allocated2.getNodeId(), allocated.getNodeId());
+
+    ContainerAssignment assigned = assignments.get(0);
+    Container container = assigned.container;
+    RoleInstance ri = roleInstance(assigned);
+    //tell the app it arrived
+    appState.containerStartSubmitted(container, ri);
+    assertNotNull(appState.onNodeManagerContainerStarted(container.getId()));
+    ops = appState.reviewRequestAndReleaseNodes();
+    assertEquals(0, ops.size());
+    assertAllContainersAA();
+
+    // identify those hosts with an aa role on
+    Map<Integer, String> naming = appState.buildNamingMap();
+    assertEquals(3, naming.size());
+
+    String name = aaRole.getName();
+    assertEquals(name, naming.get(aaRole.getKey()));
+    Map<String, NodeInformation> info =
+        appState.getRoleHistory().getNodeInformationSnapshot(naming);
+    assertTrue(SliderUtils.isNotEmpty(info));
+
+    NodeInformation nodeInformation = info.get(host);
+    assertNotNull(nodeInformation);
+    assertTrue(SliderUtils.isNotEmpty(nodeInformation.entries));
+    assertNotNull(nodeInformation.entries.get(name));
+    assertEquals(1, nodeInformation.entries.get(name).live);
+  }
+
+  @Test
+  public void testAllocateFlexUp() throws Throwable {
+    RoleStatus aaRole = getAaRole();
+
+    // want multiple instances, so there will be iterations
+    aaRole.setDesired(2);
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    getSingleRequest(ops);
+    assertEquals(1, aaRole.getRequested());
+    assertEquals(1, aaRole.getAAPending());
+    assertEquals(aaRole.getActualAndRequested() + aaRole
+            .getAAPending(), aaRole.getDesired());
+
+    // now trigger that flex up
+    aaRole.setDesired(3);
+
+    // expect: no new reqests, pending count ++
+    List<AbstractRMOperation> ops2 = appState.reviewRequestAndReleaseNodes();
+    assertTrue(ops2.isEmpty());
+    assertEquals(aaRole.getRunning() + aaRole.getAAPending() +
+            aaRole.getOutstandingAARequestCount(), aaRole.getDesired());
+
+    // 1 outstanding
+    assertEquals(0, aaRole.getRunning());
+    assertTrue(aaRole.isAARequestOutstanding());
+    // and one AA
+    assertEquals(2, aaRole.getAAPending());
+    assertAllContainersAA();
+
+    // next iter
+    assertEquals(1, submitOperations(ops, EMPTY_ID_LIST, ops2).size());
+    assertEquals(2, ops2.size());
+    assertEquals(1, aaRole.getAAPending());
+    assertAllContainersAA();
+
+    assertEquals(0, appState.reviewRequestAndReleaseNodes().size());
+    // now trigger the next execution cycle
+    List<AbstractRMOperation> ops3 = new ArrayList<>();
+    assertEquals(1, submitOperations(ops2, EMPTY_ID_LIST, ops3).size());
+    assertEquals(2, ops3.size());
+    assertEquals(0, aaRole.getAAPending());
+    assertAllContainersAA();
+
+  }
+
+  @Test
+  public void testAllocateFlexDownDecrementsPending() throws Throwable {
+    RoleStatus aaRole = getAaRole();
+
+    // want multiple instances, so there will be iterations
+    aaRole.setDesired(2);
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    getSingleRequest(ops);
+    assertEquals(1, aaRole.getAAPending());
+    assertTrue(aaRole.isAARequestOutstanding());
+
+    // flex down so that the next request should be cancelled
+    aaRole.setDesired(1);
+
+    // expect: no new requests, pending count --
+    List<AbstractRMOperation> ops2 = appState.reviewRequestAndReleaseNodes();
+    assertTrue(ops2.isEmpty());
+    assertTrue(aaRole.isAARequestOutstanding());
+    assertEquals(0, aaRole.getAAPending());
+    assertAllContainersAA();
+
+    // next iter
+    submitOperations(ops, EMPTY_ID_LIST, ops2).size();
+    assertEquals(1, ops2.size());
+    assertAllContainersAA();
+  }
+
+  /**
+   * Here flex down while there is only one outstanding request.
+   * The outstanding flex should be cancelled
+   * @throws Throwable
+   */
+  @Test
+  public void testAllocateFlexDownForcesCancel() throws Throwable {
+    RoleStatus aaRole = getAaRole();
+
+    // want multiple instances, so there will be iterations
+    aaRole.setDesired(1);
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    getSingleRequest(ops);
+    assertEquals(1, aaRole.getRequested());
+    assertEquals(0, aaRole.getAAPending());
+    assertTrue(aaRole.isAARequestOutstanding());
+
+    // flex down so that the next request should be cancelled
+    aaRole.setDesired(0);
+    // expect: no new requests, pending count --
+    List<AbstractRMOperation> ops2 = appState.reviewRequestAndReleaseNodes();
+    assertEquals(0, aaRole.getRequested());
+    assertEquals(0, aaRole.getAAPending());
+    assertFalse(aaRole.isAARequestOutstanding());
+    assertEquals(1, ops2.size());
+    getSingleCancel(ops2);
+
+    // next iter
+    submitOperations(ops, EMPTY_ID_LIST, ops2).size();
+    getSingleRelease(ops2);
+  }
+
+  void assertAllContainersAA() {
+    assertAllContainersAA(getAaRole().getKey());
+  }
+
+  /**
+   *
+   * @throws Throwable
+   */
+  @Test
+  public void testAskForTooMany() throws Throwable {
+    RoleStatus aaRole = getAaRole();
+
+    describe("Ask for 1 more than the no of available nodes;" +
+        " expect the final request to be unsatisfied until the cluster " +
+        "changes size");
+    //more than expected
+    aaRole.setDesired(NODES + 1);
+    List<AbstractRMOperation > operations = appState
+        .reviewRequestAndReleaseNodes();
+    assertTrue(aaRole.isAARequestOutstanding());
+    assertEquals(NODES, aaRole.getAAPending());
+    for (int i = 0; i < NODES; i++) {
+      String iter = "Iteration " + i + " role = " + aaRole;
+      LOG.info(iter);
+      List<AbstractRMOperation > operationsOut = new ArrayList<>();
+      assertEquals(1, submitOperations(operations, EMPTY_ID_LIST,
+          operationsOut).size());
+      operations = operationsOut;
+      if (i + 1 < NODES) {
+        assertEquals(2, operations.size());
+      } else {
+        assertEquals(1, operations.size());
+      }
+      assertAllContainersAA();
+    }
+    // expect an outstanding AA request to be unsatisfied
+    assertTrue(aaRole.getRunning() < aaRole.getDesired());
+    assertEquals(0, aaRole.getRequested());
+    assertFalse(aaRole.isAARequestOutstanding());
+    List<Container> allocatedContainers = engine.execute(operations,
+        EMPTY_ID_LIST);
+    assertEquals(0, allocatedContainers.size());
+    // in a review now, no more requests can be generated, as there is no
+    // space for AA placements, even though there is cluster capacity
+    assertEquals(0, appState.reviewRequestAndReleaseNodes().size());
+
+    // now do a node update (this doesn't touch the YARN engine; the node
+    // isn't really there)
+    NodeUpdatedOutcome outcome = addNewNode();
+    assertEquals(cloneNodemap().size(), NODES + 1);
+    assertTrue(outcome.clusterChanged);
+    // no active calls to empty
+    assertTrue(outcome.operations.isEmpty());
+    assertEquals(1, appState.reviewRequestAndReleaseNodes().size());
+  }
+
+  protected AppState.NodeUpdatedOutcome addNewNode() {
+    return updateNodes(MockFactory.INSTANCE.newNodeReport("4", NodeState
+        .RUNNING, "gpu"));
+  }
+
+  @Test
+  public void testClusterSizeChangesDuringRequestSequence() throws Throwable {
+    RoleStatus aaRole = getAaRole();
+    describe("Change the cluster size where the cluster size changes during " +
+        "a test sequence.");
+    aaRole.setDesired(NODES + 1);
+    appState.reviewRequestAndReleaseNodes();
+    assertTrue(aaRole.isAARequestOutstanding());
+    assertEquals(NODES, aaRole.getAAPending());
+    NodeUpdatedOutcome outcome = addNewNode();
+    assertTrue(outcome.clusterChanged);
+    // one call to cancel
+    assertEquals(1, outcome.operations.size());
+    // and on a review, one more to rebuild
+    assertEquals(1, appState.reviewRequestAndReleaseNodes().size());
+  }
+
+  @Test
+  public void testBindingInfoMustHaveNodeMap() throws Throwable {
+    AppStateBindingInfo bindingInfo = buildBindingInfo();
+    bindingInfo.nodeReports = null;
+    try {
+      MockAppState state = new MockAppState(bindingInfo);
+      fail("Expected an exception, got " + state);
+    } catch (IllegalArgumentException expected) {
+    }
+  }
+
+  @Test
+  public void testAMRestart() throws Throwable {
+    int desiredAA = 3;
+    getAaRole().setDesired(desiredAA);
+    List<RoleInstance> instances = createAndStartNodes();
+    List<Container> containers = new ArrayList<>();
+    for (RoleInstance instance : instances) {
+      containers.add(instance.container);
+    }
+
+    // now destroy the app state
+    AppStateBindingInfo bindingInfo = buildBindingInfo();
+    bindingInfo.application = factory.newApplication(0, 0, desiredAA).name(
+        getTestName());
+    bindingInfo.application.getComponent(ROLE2)
+        .getConfiguration().setProperty(COMPONENT_PLACEMENT_POLICY,
+        Integer.toString(PlacementPolicy.ANTI_AFFINITY_REQUIRED));
+    bindingInfo.liveContainers = containers;
+    appState = new MockAppState(bindingInfo);
+
+    RoleStatus aaRole = lookupRole(AAROLE_2.name);
+    RoleStatus gpuRole = lookupRole(MockFactory.AAROLE_1_GPU.name);
+    appState.reviewRequestAndReleaseNodes();
+    assertTrue(aaRole.isAntiAffinePlacement());
+    assertTrue(aaRole.isAARequestOutstanding());
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateContainerFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateContainerFailure.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateContainerFailure.java
new file mode 100644
index 0000000..ea0dcf4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateContainerFailure.java
@@ -0,0 +1,387 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
+import org.apache.slider.server.appmaster.actions.ResetFailureWindow;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockAM;
+import org.apache.slider.server.appmaster.model.mock.MockAppState;
+import org.apache.slider.server.appmaster.model.mock.MockRMOperationHandler;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
+import org.apache.slider.server.appmaster.state.ContainerOutcome;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+
+/**
+ * Test that if you have >1 role, the right roles are chosen for release.
+ */
+public class TestMockAppStateContainerFailure extends BaseMockAppStateTest
+    implements MockRoles {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestMockAppStateContainerFailure.class);
+
+  private MockRMOperationHandler operationHandler = new
+      MockRMOperationHandler();
+  private MockAM mockAM = new MockAM();
+
+  @Override
+  public String getTestName() {
+    return "TestMockAppStateContainerFailure";
+  }
+
+  /**
+   * Small cluster with multiple containers per node,
+   * to guarantee many container allocations on each node.
+   * @return
+   */
+  @Override
+  public MockYarnEngine createYarnEngine() {
+    return new MockYarnEngine(4, 8000);
+  }
+
+  @Override
+  public Application buildApplication() {
+    Application application = super.buildApplication();
+    application.getConfiguration().setProperty(
+        ResourceKeys.CONTAINER_FAILURE_THRESHOLD, "10");
+    return application;
+  }
+
+  @Test
+  public void testShortLivedFail() throws Throwable {
+
+    getRole0Status().setDesired(1);
+    List<RoleInstance> instances = createAndStartNodes();
+    assertEquals(1, instances.size());
+
+    RoleInstance instance = instances.get(0);
+    long created = instance.createTime;
+    long started = instance.startTime;
+    assertTrue(created > 0);
+    assertTrue(started >= created);
+    List<ContainerId> ids = extractContainerIds(instances, ROLE0);
+
+    ContainerId cid = ids.get(0);
+    assertTrue(appState.isShortLived(instance));
+    AppState.NodeCompletionResult result = appState.onCompletedContainer(
+        containerStatus(cid, 1));
+    assertNotNull(result.roleInstance);
+    assertTrue(result.containerFailed);
+    RoleStatus status = getRole0Status();
+    assertEquals(1, status.getFailed());
+//    assertEquals(1, status.getStartFailed());
+
+    //view the world
+    appState.getRoleHistory().dump();
+    List<NodeInstance> queue = appState.getRoleHistory().cloneRecentNodeList(
+        getRole0Status().getKey());
+    assertEquals(0, queue.size());
+
+  }
+
+  @Test
+  public void testLongLivedFail() throws Throwable {
+
+    getRole0Status().setDesired(1);
+    List<RoleInstance> instances = createAndStartNodes();
+    assertEquals(1, instances.size());
+
+    RoleInstance instance = instances.get(0);
+    instance.startTime = System.currentTimeMillis() - 60 * 60 * 1000;
+    assertFalse(appState.isShortLived(instance));
+    List<ContainerId> ids = extractContainerIds(instances, ROLE0);
+
+    ContainerId cid = ids.get(0);
+    AppState.NodeCompletionResult result = appState.onCompletedContainer(
+        containerStatus(cid, 1));
+    assertNotNull(result.roleInstance);
+    assertTrue(result.containerFailed);
+    RoleStatus status = getRole0Status();
+    assertEquals(1, status.getFailed());
+//    assertEquals(0, status.getStartFailed());
+
+    //view the world
+    appState.getRoleHistory().dump();
+    List<NodeInstance> queue = appState.getRoleHistory().cloneRecentNodeList(
+        getRole0Status().getKey());
+    assertEquals(1, queue.size());
+
+  }
+
+  @Test
+  public void testNodeStartFailure() throws Throwable {
+
+    getRole0Status().setDesired(1);
+    List<RoleInstance> instances = createAndSubmitNodes();
+    assertEquals(1, instances.size());
+
+    RoleInstance instance = instances.get(0);
+
+    List<ContainerId> ids = extractContainerIds(instances, ROLE0);
+
+    ContainerId cid = ids.get(0);
+    appState.onNodeManagerContainerStartFailed(cid, new SliderException(
+        "oops"));
+    RoleStatus status = getRole0Status();
+    assertEquals(1, status.getFailed());
+//    assertEquals(1, status.getStartFailed());
+
+
+    RoleHistory history = appState.getRoleHistory();
+    history.dump();
+    List<NodeInstance> queue = history.cloneRecentNodeList(getRole0Status()
+        .getKey());
+    assertEquals(0, queue.size());
+
+    NodeInstance ni = history.getOrCreateNodeInstance(instance.container);
+    NodeEntry re = ni.get(getRole0Status().getKey());
+    assertEquals(1, re.getFailed());
+    assertEquals(1, re.getStartFailed());
+  }
+
+  @Test
+  public void testRecurrentStartupFailure() throws Throwable {
+
+    getRole0Status().setDesired(1);
+    try {
+      for (int i = 0; i< 100; i++) {
+        List<RoleInstance> instances = createAndSubmitNodes();
+        assertEquals(1, instances.size());
+
+        List<ContainerId> ids = extractContainerIds(instances, ROLE0);
+
+        ContainerId cid = ids.get(0);
+        LOG.info("{} instance {} {}", i, instances.get(0), cid);
+        assertNotNull(cid);
+        appState.onNodeManagerContainerStartFailed(cid,
+            new SliderException("failure #" + i));
+        AppState.NodeCompletionResult result = appState.onCompletedContainer(
+            containerStatus(cid));
+        assertTrue(result.containerFailed);
+      }
+      fail("Cluster did not fail from too many startup failures");
+    } catch (TriggerClusterTeardownException teardown) {
+      LOG.info("Exception {} : {}", teardown.getExitCode(), teardown);
+    }
+  }
+
+  @Test
+  public void testRecurrentStartupFailureWithUnlimitedFailures() throws
+      Throwable {
+    // Update instance definition to allow containers to fail any number of
+    // times
+    AppStateBindingInfo bindingInfo = buildBindingInfo();
+    bindingInfo.application.getConfiguration().setProperty(
+        ResourceKeys.CONTAINER_FAILURE_THRESHOLD, "0");
+    appState = new MockAppState(bindingInfo);
+
+    getRole0Status().setDesired(1);
+    try {
+      for (int i = 0; i < 100; i++) {
+        List<RoleInstance> instances = createAndSubmitNodes();
+        assertEquals(1, instances.size());
+
+        List<ContainerId> ids = extractContainerIds(instances, ROLE0);
+
+        ContainerId cid = ids.get(0);
+        LOG.info("{} instance {} {}", i, instances.get(0), cid);
+        assertNotNull(cid);
+        appState.onNodeManagerContainerStartFailed(cid,
+            new SliderException("failure #" + i));
+        AppState.NodeCompletionResult result = appState.onCompletedContainer(
+            containerStatus(cid));
+        assertTrue(result.containerFailed);
+      }
+    } catch (TriggerClusterTeardownException teardown) {
+      LOG.info("Exception {} : {}", teardown.getExitCode(), teardown);
+      fail("Cluster failed despite " + ResourceKeys
+          .CONTAINER_FAILURE_THRESHOLD + " = 0");
+    }
+  }
+
+  @Test
+  public void testRoleStatusFailureWindow() throws Throwable {
+
+    ResetFailureWindow resetter = new ResetFailureWindow(operationHandler);
+
+    // initial reset
+    resetter.execute(mockAM, null, appState);
+
+    getRole0Status().setDesired(1);
+    for (int i = 0; i < 100; i++) {
+      resetter.execute(mockAM, null, appState);
+      List<RoleInstance> instances = createAndSubmitNodes();
+      assertEquals(1, instances.size());
+
+      List<ContainerId> ids = extractContainerIds(instances, ROLE0);
+
+      ContainerId cid = ids.get(0);
+      LOG.info("{} instance {} {}", i, instances.get(0), cid);
+      assertNotNull(cid);
+      appState.onNodeManagerContainerStartFailed(
+          cid,
+          new SliderException("failure #" + i));
+      AppState.NodeCompletionResult result = appState.onCompletedContainer(
+          containerStatus(cid));
+      assertTrue(result.containerFailed);
+    }
+  }
+
+  @Test
+  public void testRoleStatusFailed() throws Throwable {
+    RoleStatus status = getRole0Status();
+    // limits exceeded
+    appState.incFailedContainers(status, ContainerOutcome.Failed);
+    assertEquals(1, status.getFailed());
+    assertEquals(1L, status.getFailedRecently());
+    assertEquals(0L, status.getLimitsExceeded());
+    assertEquals(0L, status.getPreempted());
+    assertEquals(0L, status.getDiskFailed());
+
+    ResetFailureWindow resetter = new ResetFailureWindow(operationHandler);
+    resetter.execute(mockAM, null, appState);
+    assertEquals(1, status.getFailed());
+    assertEquals(0L, status.getFailedRecently());
+  }
+
+  @Test
+  public void testRoleStatusFailedLimitsExceeded() throws Throwable {
+    RoleStatus status = getRole0Status();
+    // limits exceeded
+    appState.incFailedContainers(status, ContainerOutcome
+        .Failed_limits_exceeded);
+    assertEquals(1, status.getFailed());
+    assertEquals(1L, status.getFailedRecently());
+    assertEquals(1L, status.getLimitsExceeded());
+    assertEquals(0L, status.getPreempted());
+    assertEquals(0L, status.getDiskFailed());
+
+    ResetFailureWindow resetter = new ResetFailureWindow(operationHandler);
+    resetter.execute(mockAM, null, appState);
+    assertEquals(1, status.getFailed());
+    assertEquals(0L, status.getFailedRecently());
+    assertEquals(1L, status.getLimitsExceeded());
+  }
+
+
+  @Test
+  public void testRoleStatusFailedPrempted() throws Throwable {
+    RoleStatus status = getRole0Status();
+    // limits exceeded
+    appState.incFailedContainers(status, ContainerOutcome.Preempted);
+    assertEquals(0, status.getFailed());
+    assertEquals(1L, status.getPreempted());
+    assertEquals(0L, status.getFailedRecently());
+    assertEquals(0L, status.getDiskFailed());
+
+    ResetFailureWindow resetter = new ResetFailureWindow(operationHandler);
+    resetter.execute(mockAM, null, appState);
+    assertEquals(1L, status.getPreempted());
+  }
+
+
+  @Test
+  public void testRoleStatusFailedNode() throws Throwable {
+    RoleStatus status = getRole0Status();
+    // limits exceeded
+    appState.incFailedContainers(status, ContainerOutcome.Disk_failure);
+    assertEquals(1, status.getFailed());
+    assertEquals(0L, status.getFailedRecently());
+    assertEquals(0L, status.getLimitsExceeded());
+    assertEquals(0L, status.getPreempted());
+    assertEquals(1L, status.getDiskFailed());
+  }
+
+  @Test
+  public void testNodeEntryCompleted() throws Throwable {
+    NodeEntry nodeEntry = new NodeEntry(1);
+    nodeEntry.containerCompleted(true, ContainerOutcome.Completed);
+    assertEquals(0, nodeEntry.getFailed());
+    assertEquals(0, nodeEntry.getFailedRecently());
+    assertEquals(0, nodeEntry.getStartFailed());
+    assertEquals(0, nodeEntry.getPreempted());
+    assertEquals(0, nodeEntry.getActive());
+    assertTrue(nodeEntry.isAvailable());
+  }
+
+  @Test
+  public void testNodeEntryFailed() throws Throwable {
+    NodeEntry nodeEntry = new NodeEntry(1);
+    nodeEntry.containerCompleted(false, ContainerOutcome.Failed);
+    assertEquals(1, nodeEntry.getFailed());
+    assertEquals(1, nodeEntry.getFailedRecently());
+    assertEquals(0, nodeEntry.getStartFailed());
+    assertEquals(0, nodeEntry.getPreempted());
+    assertEquals(0, nodeEntry.getActive());
+    assertTrue(nodeEntry.isAvailable());
+    nodeEntry.resetFailedRecently();
+    assertEquals(1, nodeEntry.getFailed());
+    assertEquals(0, nodeEntry.getFailedRecently());
+  }
+
+  @Test
+  public void testNodeEntryLimitsExceeded() throws Throwable {
+    NodeEntry nodeEntry = new NodeEntry(1);
+    nodeEntry.containerCompleted(false, ContainerOutcome
+        .Failed_limits_exceeded);
+    assertEquals(0, nodeEntry.getFailed());
+    assertEquals(0, nodeEntry.getFailedRecently());
+    assertEquals(0, nodeEntry.getStartFailed());
+    assertEquals(0, nodeEntry.getPreempted());
+  }
+
+  @Test
+  public void testNodeEntryPreempted() throws Throwable {
+    NodeEntry nodeEntry = new NodeEntry(1);
+    nodeEntry.containerCompleted(false, ContainerOutcome.Preempted);
+    assertEquals(0, nodeEntry.getFailed());
+    assertEquals(0, nodeEntry.getFailedRecently());
+    assertEquals(0, nodeEntry.getStartFailed());
+    assertEquals(1, nodeEntry.getPreempted());
+  }
+
+  @Test
+  public void testNodeEntryNodeFailure() throws Throwable {
+    NodeEntry nodeEntry = new NodeEntry(1);
+    nodeEntry.containerCompleted(false, ContainerOutcome.Disk_failure);
+    assertEquals(1, nodeEntry.getFailed());
+    assertEquals(1, nodeEntry.getFailedRecently());
+    assertEquals(0, nodeEntry.getStartFailed());
+    assertEquals(0, nodeEntry.getPreempted());
+  }
+
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicHistory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicHistory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicHistory.java
new file mode 100644
index 0000000..da2ed0d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicHistory.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.providers.PlacementPolicy;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Test that if you have >1 role, the right roles are chosen for release.
+ */
+public class TestMockAppStateDynamicHistory extends BaseMockAppStateTest
+    implements MockRoles {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestMockAppStateDynamicHistory.class);
+
+  /**
+   * Small cluster with multiple containers per node,
+   * to guarantee many container allocations on each node.
+   * @return
+   */
+  @Override
+  public MockYarnEngine createYarnEngine() {
+    return new MockYarnEngine(8, 1);
+  }
+
+  @Test
+  public void testDynamicRoleHistory() throws Throwable {
+
+    String dynamic = "dynamicRole";
+    long desired = 1;
+    int placementPolicy = PlacementPolicy.DEFAULT;
+    // snapshot and patch existing spec
+    Application application = appState.getClusterStatus();
+    Component component = new Component().name(dynamic).numberOfContainers(
+        desired);
+    component.getConfiguration().setProperty(ResourceKeys
+        .COMPONENT_PLACEMENT_POLICY, "" + placementPolicy);
+    application.getComponents().add(component);
+
+    // write the definitions
+    List<ProviderRole> updates = appState.updateComponents(
+        Collections.singletonMap(dynamic, desired));
+    assertEquals(1, updates.size());
+    ProviderRole updatedRole = updates.get(0);
+    assertEquals(updatedRole.placementPolicy, placementPolicy);
+
+    // now look at the role map
+    assertNotNull(appState.getRoleMap().get(dynamic));
+    ProviderRole mappedRole = appState.getRoleMap().get(dynamic);
+    int rolePriority = mappedRole.id;
+
+    Map<Integer, ProviderRole> priorityMap = appState.getRolePriorityMap();
+    assertEquals(priorityMap.size(), 4);
+    ProviderRole dynamicProviderRole = priorityMap.get(rolePriority);
+    assertNotNull(dynamicProviderRole);
+    assertEquals(dynamicProviderRole.id, rolePriority);
+
+    assertNotNull(appState.getRoleStatusMap().get(rolePriority));
+    RoleStatus dynamicRoleStatus =
+        appState.getRoleStatusMap().get(rolePriority);
+    assertEquals(dynamicRoleStatus.getDesired(), desired);
+
+
+    // before allocating the nodes, fill up the capacity of some of the
+    // hosts
+    engine.getAllocator().nextIndex();
+
+    int targetNode = 2;
+    assertEquals(targetNode, engine.getAllocator().nextIndex());
+    String targetHostname = engine.getCluster().nodeAt(targetNode)
+        .getHostname();
+
+    // clock is set to a small value
+    appState.setTime(100000);
+
+    // allocate the nodes
+    List<AbstractRMOperation> actions = appState.reviewRequestAndReleaseNodes();
+    assertEquals(1, actions.size());
+    ContainerRequestOperation action0 = (ContainerRequestOperation)actions
+        .get(0);
+
+    ContainerRequest request = action0.getRequest();
+    assertTrue(SliderUtils.isEmpty(request.getNodes()));
+
+    List<ContainerId> released = new ArrayList<>();
+    List<RoleInstance> allocations = submitOperations(actions, released);
+    processSubmissionOperations(allocations, new ArrayList<>(), released);
+    assertEquals(1, allocations.size());
+    RoleInstance ri = allocations.get(0);
+
+    assertEquals(ri.role, dynamic);
+    assertEquals(ri.roleId, rolePriority);
+    assertEquals(ri.host, targetHostname);
+
+    // now look at the role history
+
+    RoleHistory roleHistory = appState.getRoleHistory();
+    List<NodeInstance> activeNodes = roleHistory.listActiveNodes(
+        rolePriority);
+    assertEquals(activeNodes.size(), 1);
+    NodeInstance activeNode = activeNodes.get(0);
+    assertNotNull(activeNode.get(rolePriority));
+    NodeEntry entry8 = activeNode.get(rolePriority);
+    assertEquals(entry8.getActive(), 1);
+
+    assertEquals(activeNode.hostname, targetHostname);
+
+    NodeInstance activeNodeInstance =
+        roleHistory.getOrCreateNodeInstance(ri.container);
+
+    assertEquals(activeNode, activeNodeInstance);
+    NodeEntry entry = activeNodeInstance.get(rolePriority);
+    assertNotNull(entry);
+    assertTrue(entry.getActive() > 0);
+    assertTrue(entry.getLive() > 0);
+
+
+    // now trigger a termination event on that role
+
+    // increment time for a long-lived failure event
+    appState.incTime(100000);
+
+    LOG.debug("Triggering failure");
+    ContainerId cid = ri.getContainerId();
+    AppState.NodeCompletionResult result = appState.onCompletedContainer(
+        containerStatus(cid, 1));
+    assertEquals(result.roleInstance, ri);
+    assertTrue(result.containerFailed);
+
+    roleHistory.dump();
+    // values should have changed
+    assertEquals(1, entry.getFailed());
+    assertEquals(0, entry.getStartFailed());
+    assertEquals(0, entry.getActive());
+    assertEquals(0, entry.getLive());
+
+
+    List<NodeInstance> nodesForRoleId =
+        roleHistory.getRecentNodesForRoleId(rolePriority);
+    assertNotNull(nodesForRoleId);
+
+    // make sure new nodes will default to a different host in the engine
+    assertTrue(targetNode < engine.getAllocator().nextIndex());
+
+    actions = appState.reviewRequestAndReleaseNodes();
+    assertEquals(1, actions.size());
+    ContainerRequestOperation action1 = (ContainerRequestOperation) actions
+        .get(0);
+    ContainerRequest request1 = action1.getRequest();
+    assertTrue(SliderUtils.isNotEmpty(request1.getNodes()));
+  }
+
+  @Test(expected = BadConfigException.class)
+  public void testRoleHistoryRoleAdditions() throws Throwable {
+    MockRoleHistory roleHistory = new MockRoleHistory(new ArrayList<>());
+    roleHistory.addNewRole(new RoleStatus(new ProviderRole("one", 1)));
+    roleHistory.addNewRole(new RoleStatus(new ProviderRole("two", 1)));
+    roleHistory.dump();
+  }
+
+  @Test(expected = BadConfigException.class)
+  public void testRoleHistoryRoleStartupConflict() throws Throwable {
+    MockRoleHistory roleHistory = new MockRoleHistory(Arrays.asList(
+        new ProviderRole("one", 1), new ProviderRole("two", 1)
+    ));
+    roleHistory.dump();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicRoles.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicRoles.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicRoles.java
new file mode 100644
index 0000000..2c695fd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicRoles.java
@@ -0,0 +1,243 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.providers.PlacementPolicy;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.apache.slider.server.appmaster.state.AppState.NodeCompletionResult;
+import org.apache.slider.server.appmaster.state.ContainerPriority;
+import org.apache.slider.server.appmaster.state.RoleHistoryUtils;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.slider.server.appmaster.model.mock.MockFactory.NODE_FAILURE_THRESHOLD;
+
+/**
+ * Test that if you have >1 role, the right roles are chosen for release.
+ */
+public class TestMockAppStateDynamicRoles extends BaseMockAppStateTest
+    implements MockRoles {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestMockAppStateDynamicRoles.class);
+  private static final String ROLE4 = "4";
+  private static final String ROLE5 = "5";
+
+  @Override
+  public String getTestName() {
+    return "TestMockAppStateDynamicRoles";
+  }
+
+  /**
+   * Small cluster with multiple containers per node,
+   * to guarantee many container allocations on each node.
+   * @return
+   */
+  @Override
+  public MockYarnEngine createYarnEngine() {
+    return new MockYarnEngine(8, 2);
+  }
+
+  @Override
+  public Application buildApplication() {
+    Application application = super.buildApplication();
+
+    Component component = new Component().name(ROLE4).numberOfContainers(1L);
+    component.getConfiguration().setProperty(ResourceKeys
+        .NODE_FAILURE_THRESHOLD, Integer.toString(3));
+    application.getComponents().add(component);
+
+    component = new Component().name(ROLE5).numberOfContainers(1L);
+    component.getConfiguration().setProperty(ResourceKeys
+        .COMPONENT_PLACEMENT_POLICY, Integer.toString(PlacementPolicy.STRICT));
+    application.getComponents().add(component);
+
+    return application;
+  }
+
+  @Test
+  public void testAllocateReleaseRealloc() throws Throwable {
+
+    createAndStartNodes();
+    appState.reviewRequestAndReleaseNodes();
+    appState.getRoleHistory().dump();
+  }
+
+  /**
+   * Find all allocations for a specific role.
+   * @param role role Id/priority
+   * @param actions source list
+   * @return found list
+   */
+  List<ContainerRequestOperation> findAllocationsForRole(int role,
+      List<AbstractRMOperation> actions) {
+    List<ContainerRequestOperation> ops = new ArrayList<>();
+    for (AbstractRMOperation op : actions) {
+      if (op instanceof ContainerRequestOperation && role ==
+          ContainerPriority.extractRole(((ContainerRequestOperation) op)
+              .getRequest().getPriority())) {
+        ops.add((ContainerRequestOperation) op);
+      }
+    }
+    return ops;
+  }
+
+  @Test
+  public void testStrictPlacementInitialRequest() throws Throwable {
+    LOG.info("Initial engine state = {}", engine);
+    List<AbstractRMOperation> actions = appState.reviewRequestAndReleaseNodes();
+    assertEquals(2, actions.size());
+
+    // neither have locality at this point
+    assertRelaxLocalityFlag(appState.lookupRoleStatus(ROLE4).getKey(), null,
+        true, actions);
+    assertRelaxLocalityFlag(appState.lookupRoleStatus(ROLE5).getKey(), null,
+        true, actions);
+  }
+
+  @Test
+  public void testPolicyPropagation() throws Throwable {
+    assertEquals(0, (appState.lookupRoleStatus(ROLE4).getPlacementPolicy() &
+        PlacementPolicy.STRICT));
+    assertNotEquals(0, (appState.lookupRoleStatus(ROLE5).getPlacementPolicy() &
+        PlacementPolicy.STRICT));
+
+  }
+
+  @Test
+  public void testNodeFailureThresholdPropagation() throws Throwable {
+    assertEquals(3, appState.lookupRoleStatus(ROLE4).getNodeFailureThreshold());
+    assertEquals(NODE_FAILURE_THRESHOLD, appState.lookupRoleStatus(ROLE5)
+        .getNodeFailureThreshold());
+  }
+
+  @Test
+  public void testLaxPlacementSecondRequestRole4() throws Throwable {
+    LOG.info("Initial engine state = {}", engine);
+    RoleStatus role4 = appState.lookupRoleStatus(ROLE4);
+    RoleStatus role5 = appState.lookupRoleStatus(ROLE5);
+    role4.setDesired(1);
+    role5.setDesired(0);
+
+    List<RoleInstance> instances = createStartAndStopNodes(new ArrayList<>());
+    assertEquals(1, instances.size());
+
+    int id = appState.lookupRoleStatus(ROLE4).getKey();
+    RoleInstance instanceA = null;
+    for (RoleInstance instance : instances) {
+      if (instance.roleId == id) {
+        instanceA = instance;
+      }
+    }
+    assertNotNull(instanceA);
+    String hostname = RoleHistoryUtils.hostnameOf(instanceA.container);
+
+    LOG.info("Allocated engine state = {}", engine);
+    assertEquals(1, engine.containerCount());
+
+    assertEquals(1, role4.getRunning());
+    // shrinking cluster
+
+    role4.setDesired(0);
+    appState.lookupRoleStatus(ROLE4).setDesired(0);
+    List<NodeCompletionResult> completionResults = new ArrayList<>();
+    createStartAndStopNodes(completionResults);
+    assertEquals(0, engine.containerCount());
+    assertEquals(1, completionResults.size());
+
+    // expanding: expect hostnames  now
+    role4.setDesired(1);
+    List<AbstractRMOperation> actions = appState.reviewRequestAndReleaseNodes();
+    assertEquals(1, actions.size());
+
+    ContainerRequestOperation cro = (ContainerRequestOperation) actions.get(0);
+    List<String> nodes = cro.getRequest().getNodes();
+    assertEquals(1, nodes.size());
+    assertEquals(hostname, nodes.get(0));
+  }
+
+  @Test
+  public void testStrictPlacementSecondRequestRole5() throws Throwable {
+    LOG.info("Initial engine state = {}", engine);
+    RoleStatus role4 = appState.lookupRoleStatus(ROLE4);
+    RoleStatus role5 = appState.lookupRoleStatus(ROLE5);
+    role4.setDesired(0);
+    role5.setDesired(1);
+
+    List<RoleInstance> instances = createStartAndStopNodes(new ArrayList<>());
+    assertEquals(1, instances.size());
+
+    int id = appState.lookupRoleStatus(ROLE5).getKey();
+    RoleInstance instanceA = null;
+    for (RoleInstance instance : instances) {
+      if (instance.roleId == id) {
+        instanceA = instance;
+      }
+    }
+    assertNotNull(instanceA);
+    String hostname = RoleHistoryUtils.hostnameOf(instanceA.container);
+
+    LOG.info("Allocated engine state = {}", engine);
+    assertEquals(1, engine.containerCount());
+
+    assertEquals(1, role5.getRunning());
+
+    // shrinking cluster
+    role5.setDesired(0);
+    List<NodeCompletionResult> completionResults = new ArrayList<>();
+    createStartAndStopNodes(completionResults);
+    assertEquals(0, engine.containerCount());
+    assertEquals(1, completionResults.size());
+    assertEquals(0, role5.getRunning());
+
+    role5.setDesired(1);
+    List<AbstractRMOperation> actions = appState.reviewRequestAndReleaseNodes();
+    assertEquals(1, actions.size());
+    assertRelaxLocalityFlag(id, "", false, actions);
+    ContainerRequestOperation cro = (ContainerRequestOperation) actions.get(0);
+    List<String> nodes = cro.getRequest().getNodes();
+    assertEquals(1, nodes.size());
+    assertEquals(hostname, nodes.get(0));
+  }
+
+  public void assertRelaxLocalityFlag(
+      int role,
+      String expectedHost,
+      boolean expectedRelaxFlag,
+      List<AbstractRMOperation> actions) {
+    List<ContainerRequestOperation> requests = findAllocationsForRole(
+        role, actions);
+    assertEquals(1, requests.size());
+    ContainerRequestOperation req = requests.get(0);
+    assertEquals(expectedRelaxFlag, req.getRequest().getRelaxLocality());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java
new file mode 100644
index 0000000..01bf9bd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.core.exceptions.SliderInternalStateException;
+import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockAppState;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
+import org.apache.slider.server.appmaster.state.MostRecentContainerReleaseSelector;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.avro.LoadedRoleHistory;
+import org.apache.slider.server.avro.RoleHistoryWriter;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.util.Collections;
+
+/**
+ * Test that if you have more than one role, the right roles are chosen for
+ * release.
+ */
+public class TestMockAppStateFlexDynamicRoles extends BaseMockAppStateTest
+    implements MockRoles {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestMockAppStateFlexDynamicRoles.class);
+
+  @Override
+  public String getTestName() {
+    return "TestMockAppStateFlexDynamicRoles";
+  }
+
+  /**
+   * Small cluster with multiple containers per node,
+   * to guarantee many container allocations on each node.
+   * @return
+   */
+  @Override
+  public MockYarnEngine createYarnEngine() {
+    return new MockYarnEngine(4, 4);
+  }
+
+  @Override
+  public AppStateBindingInfo buildBindingInfo() {
+    AppStateBindingInfo bindingInfo = super.buildBindingInfo();
+    bindingInfo.releaseSelector = new MostRecentContainerReleaseSelector();
+    return bindingInfo;
+  }
+
+  @Override
+  public Application buildApplication() {
+    Application application = super.buildApplication();
+    Component component = new Component().name("dynamic-6")
+        .numberOfContainers(1L);
+    application.getComponents().add(component);
+
+    return application;
+  }
+
+  @Before
+  public void init()
+      throws TriggerClusterTeardownException, SliderInternalStateException {
+    createAndStartNodes();
+  }
+
+  @Test
+  public void testDynamicFlexAddRole() throws Throwable {
+    Application application = appState.getClusterStatus();
+    Component component = new Component().name("dynamicAdd7")
+        .numberOfContainers(1L);
+    application.getComponents().add(component);
+    appState.updateComponents(Collections.singletonMap(component.getName(),
+        component.getNumberOfContainers()));
+    createAndStartNodes();
+    dumpClusterDescription("updated CD", appState.getClusterStatus());
+    appState.lookupRoleStatus("dynamicAdd7");
+  }
+
+  @Test
+  public void testDynamicFlexDropRole() throws Throwable {
+    appState.updateComponents(Collections.singletonMap("dynamic-6", 0L));
+
+    Application getCD = appState.getClusterStatus();
+    dumpClusterDescription("updated CD", getCD);
+    //status is retained for future
+    appState.lookupRoleStatus("dynamic-6");
+  }
+
+
+  @Test
+  public void testHistorySaveFlexLoad() throws Throwable {
+    Application application = appState.getClusterStatus();
+    RoleHistory roleHistory = appState.getRoleHistory();
+    Path history = roleHistory.saveHistory(0x0001);
+    RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+    Component component = new Component().name("HistorySaveFlexLoad")
+        .numberOfContainers(1L);
+    application.getComponents().add(component);
+
+    appState.updateComponents(Collections.singletonMap(component.getName(),
+        component.getNumberOfContainers()));
+    createAndStartNodes();
+    LoadedRoleHistory loadedRoleHistory =
+        historyWriter.read(fs, history);
+    assertEquals(0, appState.getRoleHistory().rebuild(loadedRoleHistory));
+  }
+
+  @Test
+  public void testHistoryFlexSaveResetLoad() throws Throwable {
+    Application application = appState.getClusterStatus();
+    Component component = new Component().name("HistoryFlexSaveLoad")
+        .numberOfContainers(1L);
+    application.getComponents().add(component);
+
+    appState.updateComponents(Collections.singletonMap(component.getName(),
+        component.getNumberOfContainers()));
+    createAndStartNodes();
+    RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+    RoleHistory roleHistory = appState.getRoleHistory();
+    Path history = roleHistory.saveHistory(0x0002);
+    //now reset the app state
+    File historyWorkDir2 = new File("target/history" + getTestName() +
+        "-0002");
+    Path historyPath2 = new Path(historyWorkDir2.toURI());
+    appState = new MockAppState();
+    AppStateBindingInfo binding2 = buildBindingInfo();
+    binding2.application = factory.newApplication(0, 0, 0)
+        .name(getTestName());
+    binding2.historyPath = historyPath2;
+    appState.buildInstance(binding2);
+    // on this read there won't be the right number of roles
+    LoadedRoleHistory loadedRoleHistory = historyWriter.read(fs, history);
+    assertEquals(0, appState.getRoleHistory().rebuild(loadedRoleHistory));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexing.java
new file mode 100644
index 0000000..9b5e532
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexing.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.types.ApplicationLivenessInformation;
+import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.CancelSingleRequest;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.ContainerAssignment;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Test app state flexing.
+ */
+public class TestMockAppStateFlexing extends BaseMockAppStateTest implements
+    MockRoles {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(BaseMockAppStateTest.class);
+
+  @Override
+  public String getTestName() {
+    return "TestMockAppStateFlexing";
+  }
+
+  @Test
+  public void testFlexDuringLaunchPhase() throws Throwable {
+
+    // ask for one instance of role0
+    getRole0Status().setDesired(1);
+
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+
+    // at this point there's now one request in the list
+    assertEquals(1, ops.size());
+    // and in a liveness check, one outstanding
+    ApplicationLivenessInformation liveness =
+        appState.getApplicationLivenessInformation();
+    assertEquals(1, liveness.requestsOutstanding);
+    assertFalse(liveness.allRequestsSatisfied);
+
+    List<Container> allocations = engine.execute(ops);
+    List<ContainerAssignment> assignments = new ArrayList<>();
+    List<AbstractRMOperation> releases = new ArrayList<>();
+    appState.onContainersAllocated(allocations, assignments, releases);
+    assertEquals(1, assignments.size());
+    ContainerAssignment assigned = assignments.get(0);
+    Container target = assigned.container;
+    RoleInstance ri = roleInstance(assigned);
+
+    ops = appState.reviewRequestAndReleaseNodes();
+    assertTrue(ops.isEmpty());
+
+    liveness = appState.getApplicationLivenessInformation();
+    assertEquals(0, liveness.requestsOutstanding);
+    assertTrue(liveness.allRequestsSatisfied);
+
+    //now this is the start point.
+    appState.containerStartSubmitted(target, ri);
+
+    ops = appState.reviewRequestAndReleaseNodes();
+    assertTrue(ops.isEmpty());
+
+    appState.innerOnNodeManagerContainerStarted(target.getId());
+  }
+
+  @Test
+  public void testFlexBeforeAllocationPhase() throws Throwable {
+    getRole0Status().setDesired(1);
+
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    assertFalse(ops.isEmpty());
+
+    // second scan will find the first run outstanding, so not re-issue
+    // any more container requests
+    List<AbstractRMOperation> ops2 = appState.reviewRequestAndReleaseNodes();
+    assertTrue(ops2.isEmpty());
+
+    // and in a liveness check, one outstanding
+    ApplicationLivenessInformation liveness = appState
+        .getApplicationLivenessInformation();
+    assertEquals(1, liveness.requestsOutstanding);
+    assertFalse(liveness.allRequestsSatisfied);
+
+    appState.refreshClusterStatus();
+    Application application = appState.getClusterStatus();
+    // TODO cluster status returns liveness info
+//    assertEquals(1, cd.liveness.requestsOutstanding);
+
+  }
+
+
+  @Test
+  public void testFlexDownTwice() throws Throwable {
+    int r0 = 6;
+    int r1 = 0;
+    int r2 = 0;
+    getRole0Status().setDesired(r0);
+    getRole1Status().setDesired(r1);
+    getRole2Status().setDesired(r2);
+    List<RoleInstance> instances = createAndStartNodes();
+
+    int clusterSize = r0 + r1 + r2;
+    assertEquals(instances.size(), clusterSize);
+    LOG.info("shrinking cluster");
+    r0 = 4;
+    getRole0Status().setDesired(r0);
+    List<AppState.NodeCompletionResult> completionResults = new ArrayList<>();
+    instances = createStartAndStopNodes(completionResults);
+    assertEquals(0, instances.size());
+    // assert two nodes were released
+    assertEquals(2, completionResults.size());
+
+    // no-op review
+    completionResults = new ArrayList<>();
+    instances = createStartAndStopNodes(completionResults);
+    assertEquals(0, instances.size());
+    // assert two nodes were released
+    assertEquals(0, completionResults.size());
+
+
+    // now shrink again
+    getRole0Status().setDesired(1);
+    completionResults = new ArrayList<>();
+    instances = createStartAndStopNodes(completionResults);
+    assertEquals(0, instances.size());
+    // assert two nodes were released
+    assertEquals(3, completionResults.size());
+
+  }
+
+  @Test
+  public void testFlexNegative() throws Throwable {
+    int r0 = 6;
+    int r1 = 0;
+    int r2 = 0;
+    getRole0Status().setDesired(r0);
+    getRole1Status().setDesired(r1);
+    getRole2Status().setDesired(r2);
+    List<RoleInstance> instances = createAndStartNodes();
+
+    int clusterSize = r0 + r1 + r2;
+    assertEquals(instances.size(), clusterSize);
+    LOG.info("shrinking cluster");
+    getRole0Status().setDesired(-2);
+    List<AppState.NodeCompletionResult> completionResults = new ArrayList<>();
+    try {
+      createStartAndStopNodes(completionResults);
+      fail("expected an exception");
+    } catch (TriggerClusterTeardownException e) {
+    }
+
+  }
+
+  @Test
+  public void testCancelWithRequestsOutstanding() throws Throwable {
+    // flex cluster size before the original set were allocated
+
+
+    getRole0Status().setDesired(6);
+    // build the ops
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    // here the data structures exist
+
+    // go down
+    getRole0Status().setDesired(3);
+    List<AbstractRMOperation> ops2 = appState.reviewRequestAndReleaseNodes();
+    assertEquals(3, ops2.size());
+    for (AbstractRMOperation op : ops2) {
+      assertTrue(op instanceof CancelSingleRequest);
+    }
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRMOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRMOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRMOperations.java
new file mode 100644
index 0000000..2d87be6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRMOperations.java
@@ -0,0 +1,382 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockRMOperationHandler;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.CancelSingleRequest;
+import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.ContainerAssignment;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.slider.server.appmaster.state.ContainerPriority.buildPriority;
+import static org.apache.slider.server.appmaster.state.ContainerPriority.extractRole;
+
+/**
+ * Test app state RM operations.
+ */
+public class TestMockAppStateRMOperations extends BaseMockAppStateTest
+    implements MockRoles {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(BaseMockAppStateTest.class);
+
+  @Override
+  public String getTestName() {
+    return "TestMockAppStateRMOperations";
+  }
+
+  @Test
+  public void testPriorityOnly() throws Throwable {
+    assertEquals(5, extractRole(buildPriority(5, false)));
+  }
+
+  @Test
+  public void testPriorityRoundTrip() throws Throwable {
+    assertEquals(5, extractRole(buildPriority(5, false)));
+  }
+
+  @Test
+  public void testPriorityRoundTripWithRequest() throws Throwable {
+    int priority = buildPriority(5, false);
+    assertEquals(5, extractRole(priority));
+  }
+
+  @Test
+  public void testMockAddOp() throws Throwable {
+    getRole0Status().setDesired(1);
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    assertListLength(ops, 1);
+    ContainerRequestOperation operation = (ContainerRequestOperation)ops.get(0);
+    int priority = operation.getRequest().getPriority().getPriority();
+    assertEquals(extractRole(priority), getRole0Status().getKey());
+    MockRMOperationHandler handler = new MockRMOperationHandler();
+    handler.execute(ops);
+
+    AbstractRMOperation op = handler.getFirstOp();
+    assertTrue(op instanceof ContainerRequestOperation);
+  }
+
+  /**
+   * Test of a flex up and down op which verifies that outstanding
+   * requests are cancelled first.
+   * <ol>
+   *   <li>request 5 nodes, assert 5 request made</li>
+   *   <li>allocate 1 of them</li>
+   *   <li>flex cluster size to 3</li>
+   *   <li>assert this generates 2 cancel requests</li>
+   * </ol>
+   */
+  @Test
+  public void testRequestThenCancelOps() throws Throwable {
+    RoleStatus role0 = getRole0Status();
+    role0.setDesired(5);
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    assertListLength(ops, 5);
+    // now 5 outstanding requests.
+    assertEquals(5, role0.getRequested());
+
+    // allocate one
+    List<AbstractRMOperation> processed = new ArrayList<>();
+    processed.add(ops.get(0));
+    List<ContainerId> released = new ArrayList<>();
+    List<AppState.NodeCompletionResult> completionResults = new ArrayList<>();
+    submitOperations(processed, released);
+    List<RoleInstance> instances = createAndSubmitNodes(released);
+    processSubmissionOperations(instances, completionResults, released);
+
+
+    // four outstanding
+    assertEquals(4, role0.getRequested());
+
+    // flex cluster to 3
+    role0.setDesired(3);
+    ops = appState.reviewRequestAndReleaseNodes();
+
+    // expect two cancel operation from review
+    assertListLength(ops, 2);
+    for (AbstractRMOperation op : ops) {
+      assertTrue(op instanceof CancelSingleRequest);
+    }
+
+    MockRMOperationHandler handler = new MockRMOperationHandler();
+    handler.setAvailableToCancel(4);
+    handler.execute(ops);
+    assertEquals(2, handler.getAvailableToCancel());
+    assertEquals(2, role0.getRequested());
+
+    // flex down one more
+    role0.setDesired(2);
+    ops = appState.reviewRequestAndReleaseNodes();
+    assertListLength(ops, 1);
+    for (AbstractRMOperation op : ops) {
+      assertTrue(op instanceof CancelSingleRequest);
+    }
+    handler.execute(ops);
+    assertEquals(1, handler.getAvailableToCancel());
+    assertEquals(1, role0.getRequested());
+  }
+
+  @Test
+  public void testCancelNoActualContainers() throws Throwable {
+    RoleStatus role0 = getRole0Status();
+    role0.setDesired(5);
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    assertListLength(ops, 5);
+    // now 5 outstanding requests.
+    assertEquals(5, role0.getRequested());
+    role0.setDesired(0);
+    ops = appState.reviewRequestAndReleaseNodes();
+    assertListLength(ops, 5);
+
+  }
+
+
+  @Test
+  public void testFlexDownOutstandingRequests() throws Throwable {
+    // engine only has two nodes, so > 2 will be outstanding
+    engine = new MockYarnEngine(1, 2);
+    List<AbstractRMOperation> ops;
+    // role: desired = 2, requested = 1, actual=1
+    RoleStatus role0 = getRole0Status();
+    role0.setDesired(4);
+    createAndSubmitNodes();
+
+    assertEquals(2, role0.getRequested());
+    assertEquals(2, role0.getRunning());
+    // there are now two outstanding, two actual
+    // Release 3 and verify that the two
+    // cancellations were combined with a release
+    role0.setDesired(1);
+    assertEquals(-3, role0.getDelta());
+    ops = appState.reviewRequestAndReleaseNodes();
+    assertListLength(ops, 3);
+    int numCancel = 0;
+    int numRelease = 0;
+    for (AbstractRMOperation op : ops) {
+      if (op instanceof CancelSingleRequest) {
+        numCancel++;
+      }
+      if (op instanceof ContainerReleaseOperation) {
+        numRelease++;
+      }
+    }
+    assertEquals(2, numCancel);
+    assertEquals(1, numRelease);
+    assertEquals(0, role0.getRequested());
+    // TODO releasing?
+//    assertEquals(1, role0.getReleasing());
+  }
+
+  @Test
+  public void testCancelAllOutstandingRequests() throws Throwable {
+
+    // role: desired = 2, requested = 1, actual=1
+    RoleStatus role0 = getRole0Status();
+    role0.setDesired(2);
+    List<AbstractRMOperation> ops;
+    ops = appState.reviewRequestAndReleaseNodes();
+    int count = 0;
+    for (AbstractRMOperation op : ops) {
+      if (op instanceof ContainerRequestOperation) {
+        count++;
+      }
+    }
+    assertEquals(2, count);
+
+    // there are now two outstanding, two actual
+    // Release 3 and verify that the two
+    // cancellations were combined with a release
+    role0.setDesired(0);
+    ops = appState.reviewRequestAndReleaseNodes();
+    assertEquals(2, ops.size());
+
+    for (AbstractRMOperation op : ops) {
+      assertTrue(op instanceof CancelSingleRequest);
+    }
+  }
+
+
+  @Test
+  public void testFlexUpOutstandingRequests() throws Throwable {
+
+    List<AbstractRMOperation> ops;
+    // role: desired = 2, requested = 1, actual=1
+    RoleStatus role0 = getRole0Status();
+    role0.setDesired(2);
+    appState.incRunningContainers(role0);
+    appState.incRequestedContainers(role0);
+
+    // flex up 2 nodes, yet expect only one node to be requested,
+    // as the  outstanding request is taken into account
+    role0.setDesired(4);
+    appState.incRequestedContainers(role0);
+
+    assertEquals(1, role0.getRunning());
+    assertEquals(2, role0.getRequested());
+    assertEquals(3, role0.getActualAndRequested());
+    assertEquals(1, role0.getDelta());
+    ops = appState.reviewRequestAndReleaseNodes();
+    assertListLength(ops, 1);
+    assertTrue(ops.get(0) instanceof ContainerRequestOperation);
+    assertEquals(3, role0.getRequested());
+  }
+
+  @Test
+  public void testFlexUpNoSpace() throws Throwable {
+    // engine only has two nodes, so > 2 will be outstanding
+    engine = new MockYarnEngine(1, 2);
+    // role: desired = 2, requested = 1, actual=1
+    RoleStatus role0 = getRole0Status();
+    role0.setDesired(4);
+    createAndSubmitNodes();
+
+    assertEquals(2, role0.getRequested());
+    assertEquals(2, role0.getRunning());
+    role0.setDesired(8);
+    assertEquals(4, role0.getDelta());
+    createAndSubmitNodes();
+    assertEquals(6, role0.getRequested());
+  }
+
+
+  @Test
+  public void testAllocateReleaseOp() throws Throwable {
+    getRole0Status().setDesired(1);
+
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    ContainerRequestOperation operation = (ContainerRequestOperation)ops.get(0);
+    AMRMClient.ContainerRequest request = operation.getRequest();
+    Container cont = engine.allocateContainer(request);
+    List<Container> allocated = new ArrayList<>();
+    allocated.add(cont);
+    List<ContainerAssignment> assignments = new ArrayList<>();
+    List<AbstractRMOperation> operations = new ArrayList<>();
+    appState.onContainersAllocated(allocated, assignments, operations);
+
+    assertListLength(ops, 1);
+    assertListLength(assignments, 1);
+    ContainerAssignment assigned = assignments.get(0);
+    Container target = assigned.container;
+    assertEquals(target.getId(), cont.getId());
+    int roleId = assigned.role.getPriority();
+    assertEquals(roleId, extractRole(request.getPriority()));
+    assertEquals(assigned.role.getName(), ROLE0);
+    RoleInstance ri = roleInstance(assigned);
+    //tell the app it arrived
+    appState.containerStartSubmitted(target, ri);
+    appState.innerOnNodeManagerContainerStarted(target.getId());
+    assertEquals(1, getRole0Status().getRunning());
+
+    //now release it by changing the role status
+    getRole0Status().setDesired(0);
+    ops = appState.reviewRequestAndReleaseNodes();
+    assertListLength(ops, 1);
+
+    assertTrue(ops.get(0) instanceof ContainerReleaseOperation);
+    ContainerReleaseOperation release = (ContainerReleaseOperation) ops.get(0);
+    assertEquals(release.getContainerId(), cont.getId());
+  }
+
+  @Test
+  public void testComplexAllocation() throws Throwable {
+    getRole0Status().setDesired(1);
+    getRole1Status().setDesired(3);
+
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    List<Container> allocations = engine.execute(ops);
+    List<ContainerAssignment> assignments = new ArrayList<>();
+    List<AbstractRMOperation> releases = new ArrayList<>();
+    appState.onContainersAllocated(allocations, assignments, releases);
+    // we expect four release requests here for all the allocated containers
+    assertListLength(releases, 4);
+    for (AbstractRMOperation op : releases) {
+      assertTrue(op instanceof CancelSingleRequest);
+    }
+    assertListLength(assignments, 4);
+    for (ContainerAssignment assigned : assignments) {
+      Container target = assigned.container;
+      RoleInstance ri = roleInstance(assigned);
+      appState.containerStartSubmitted(target, ri);
+    }
+    //insert some async operation here
+    for (ContainerAssignment assigned : assignments) {
+      Container target = assigned.container;
+      appState.innerOnNodeManagerContainerStarted(target.getId());
+    }
+    assertEquals(4, engine.containerCount());
+    getRole1Status().setDesired(0);
+    ops = appState.reviewRequestAndReleaseNodes();
+    assertListLength(ops, 3);
+    allocations = engine.execute(ops);
+    assertEquals(1, engine.containerCount());
+
+    appState.onContainersAllocated(allocations, assignments, releases);
+    assertTrue(assignments.isEmpty());
+    assertTrue(releases.isEmpty());
+  }
+
+  @Test
+  public void testDoubleNodeManagerStartEvent() throws Throwable {
+    getRole0Status().setDesired(1);
+
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    List<Container> allocations = engine.execute(ops);
+    List<ContainerAssignment> assignments = new ArrayList<>();
+    List<AbstractRMOperation> releases = new ArrayList<>();
+    appState.onContainersAllocated(allocations, assignments, releases);
+    assertListLength(assignments, 1);
+    ContainerAssignment assigned = assignments.get(0);
+    Container target = assigned.container;
+    RoleInstance ri = roleInstance(assigned);
+    appState.containerStartSubmitted(target, ri);
+    RoleInstance ri2 = appState.innerOnNodeManagerContainerStarted(target
+        .getId());
+    assertEquals(ri2, ri);
+    //try a second time, expect an error
+    try {
+      appState.innerOnNodeManagerContainerStarted(target.getId());
+      fail("Expected an exception");
+    } catch (RuntimeException expected) {
+      // expected
+    }
+    //and non-faulter should not downgrade to a null
+    LOG.warn("Ignore any exception/stack trace that appears below");
+    LOG.warn("===============================================================");
+    RoleInstance ri3 = appState.onNodeManagerContainerStarted(target.getId());
+    LOG.warn("===============================================================");
+    LOG.warn("Ignore any exception/stack trace that appeared above");
+    assertNull(ri3);
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: YARN-6192. Move yarn-native-service API records into hadoop-yarn-slider-core module. Contributed by Jian He

Posted by ji...@apache.org.
YARN-6192. Move yarn-native-service API records into hadoop-yarn-slider-core module. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e56c2281
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e56c2281
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e56c2281

Branch: refs/heads/yarn-native-services
Commit: e56c22815a5ba8b7b50c5b53a6166c8971590f01
Parents: c6142f2
Author: Billie Rinaldi <bi...@apache.org>
Authored: Tue Feb 28 14:45:20 2017 -0800
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:24 2017 -0700

----------------------------------------------------------------------
 .../dev-support/findbugs-exclude.xml            |   4 +-
 .../yarn/services/api/ApplicationApi.java       |   2 +-
 .../api/impl/ApplicationApiService.java         |  20 +-
 .../yarn/services/resource/Application.java     | 453 -------------------
 .../services/resource/ApplicationState.java     |  30 --
 .../services/resource/ApplicationStatus.java    | 145 ------
 .../hadoop/yarn/services/resource/Artifact.java | 157 -------
 .../yarn/services/resource/BaseResource.java    |  48 --
 .../yarn/services/resource/Component.java       | 381 ----------------
 .../yarn/services/resource/ConfigFile.java      | 192 --------
 .../yarn/services/resource/Configuration.java   | 149 ------
 .../yarn/services/resource/Container.java       | 294 ------------
 .../yarn/services/resource/ContainerState.java  |  25 -
 .../hadoop/yarn/services/resource/Error.java    | 125 -----
 .../yarn/services/resource/PlacementPolicy.java |  99 ----
 .../yarn/services/resource/ReadinessCheck.java  | 163 -------
 .../hadoop/yarn/services/resource/Resource.java | 149 ------
 .../src/main/webapp/WEB-INF/web.xml             |   2 +-
 .../api/impl/TestApplicationApiService.java     |   6 +-
 .../hadoop-yarn-slider-core/pom.xml             |   5 +
 .../apache/slider/api/resource/Application.java | 449 ++++++++++++++++++
 .../slider/api/resource/ApplicationState.java   |  30 ++
 .../slider/api/resource/ApplicationStatus.java  | 145 ++++++
 .../apache/slider/api/resource/Artifact.java    | 157 +++++++
 .../slider/api/resource/BaseResource.java       |  48 ++
 .../apache/slider/api/resource/Component.java   | 381 ++++++++++++++++
 .../apache/slider/api/resource/ConfigFile.java  | 192 ++++++++
 .../slider/api/resource/Configuration.java      | 149 ++++++
 .../apache/slider/api/resource/Container.java   | 294 ++++++++++++
 .../slider/api/resource/ContainerState.java     |  25 +
 .../org/apache/slider/api/resource/Error.java   | 125 +++++
 .../slider/api/resource/PlacementPolicy.java    |  99 ++++
 .../slider/api/resource/ReadinessCheck.java     | 163 +++++++
 .../apache/slider/api/resource/Resource.java    | 149 ++++++
 34 files changed, 2427 insertions(+), 2428 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
index 2843338..b89146a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
@@ -16,7 +16,5 @@
    limitations under the License.
 -->
 <FindBugsFilter>
-    <Match>
-        <Package name="org.apache.hadoop.yarn.services.resource" />
-    </Match>
+
 </FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/ApplicationApi.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/ApplicationApi.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/ApplicationApi.java
index 654413c..0fb6402 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/ApplicationApi.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/ApplicationApi.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.services.api;
 
 import javax.ws.rs.core.Response;
 
-import org.apache.hadoop.yarn.services.resource.Application;
+import org.apache.slider.api.resource.Application;
 
 /**
  * Apache Hadoop YARN Services REST API interface.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
index 7028caa..5a4726e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
@@ -57,16 +57,16 @@ import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.services.api.ApplicationApi;
-import org.apache.hadoop.yarn.services.resource.Application;
-import org.apache.hadoop.yarn.services.resource.ApplicationState;
-import org.apache.hadoop.yarn.services.resource.ApplicationStatus;
-import org.apache.hadoop.yarn.services.resource.Artifact;
-import org.apache.hadoop.yarn.services.resource.Component;
-import org.apache.hadoop.yarn.services.resource.ConfigFile;
-import org.apache.hadoop.yarn.services.resource.Configuration;
-import org.apache.hadoop.yarn.services.resource.Container;
-import org.apache.hadoop.yarn.services.resource.ContainerState;
-import org.apache.hadoop.yarn.services.resource.Resource;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.ApplicationState;
+import org.apache.slider.api.resource.ApplicationStatus;
+import org.apache.slider.api.resource.Artifact;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.api.resource.ConfigFile;
+import org.apache.slider.api.resource.Configuration;
+import org.apache.slider.api.resource.Container;
+import org.apache.slider.api.resource.ContainerState;
+import org.apache.slider.api.resource.Resource;
 import org.apache.slider.api.OptionKeys;
 import org.apache.slider.api.ResourceKeys;
 import org.apache.slider.api.StateValues;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java
deleted file mode 100644
index beeffba..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java
+++ /dev/null
@@ -1,453 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.resource;
-
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.apache.slider.providers.PlacementPolicy;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonPropertyOrder;
-
-/**
- * An Application resource has the following attributes.
- **/
-
-@ApiModel(description = "An Application resource has the following attributes.")
-@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
-@XmlRootElement
-@JsonInclude(JsonInclude.Include.NON_NULL)
-@JsonPropertyOrder({ "name", "state", "resource", "number_of_containers",
-    "lifetime", "containers" })
-public class Application extends BaseResource {
-  private static final long serialVersionUID = -4491694636566094885L;
-
-  private String name = null;
-  private String id = null;
-  private Artifact artifact = null;
-  private Resource resource = null;
-  private String launchCommand = null;
-  private Date launchTime = null;
-  private Long numberOfContainers = null;
-  private Long numberOfRunningContainers = null;
-  private Long lifetime = null;
-  private PlacementPolicy placementPolicy = null;
-  private List<Component> components = null;
-  private Configuration configuration = null;
-  private List<Container> containers = new ArrayList<>();
-  private ApplicationState state = null;
-  private Map<String, String> quicklinks = null;
-  private String queue = null;
-
-  /**
-   * A unique application name.
-   **/
-  public Application name(String name) {
-    this.name = name;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", required = true, value = "A unique application name.")
-  @JsonProperty("name")
-  public String getName() {
-    return name;
-  }
-
-  public void setName(String name) {
-    this.name = name;
-  }
-
-  /**
-   * A unique application id.
-   **/
-  public Application id(String id) {
-    this.id = id;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "A unique application id.")
-  @JsonProperty("id")
-  public String getId() {
-    return id;
-  }
-
-  public void setId(String id) {
-    this.id = id;
-  }
-
-  /**
-   * Artifact of single-component applications. Mandatory if components
-   * attribute is not specified.
-   **/
-  public Application artifact(Artifact artifact) {
-    this.artifact = artifact;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Artifact of single-component applications. Mandatory if components attribute is not specified.")
-  @JsonProperty("artifact")
-  public Artifact getArtifact() {
-    return artifact;
-  }
-
-  public void setArtifact(Artifact artifact) {
-    this.artifact = artifact;
-  }
-
-  /**
-   * Resource of single-component applications or the global default for
-   * multi-component applications. Mandatory if it is a single-component
-   * application and if cpus and memory are not specified at the Application
-   * level.
-   **/
-  public Application resource(Resource resource) {
-    this.resource = resource;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Resource of single-component applications or the global default for multi-component applications. Mandatory if it is a single-component application and if cpus and memory are not specified at the Application level.")
-  @JsonProperty("resource")
-  public Resource getResource() {
-    return resource;
-  }
-
-  public void setResource(Resource resource) {
-    this.resource = resource;
-  }
-
-  /**
-   * The custom launch command of an application component (optional). If not
-   * specified for applications with docker images say, it will default to the
-   * default start command of the image. If there is a single component in this
-   * application, you can specify this without the need to have a 'components'
-   * section.
-   **/
-  public Application launchCommand(String launchCommand) {
-    this.launchCommand = launchCommand;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "The custom launch command of an application component (optional). If not specified for applications with docker images say, it will default to the default start command of the image. If there is a single component in this application, you can specify this without the need to have a 'components' section.")
-  @JsonProperty("launch_command")
-  public String getLaunchCommand() {
-    return launchCommand;
-  }
-
-  @XmlElement(name = "launch_command")
-  public void setLaunchCommand(String launchCommand) {
-    this.launchCommand = launchCommand;
-  }
-
-  /**
-   * The time when the application was created, e.g. 2016-03-16T01:01:49.000Z.
-   **/
-  public Application launchTime(Date launchTime) {
-    this.launchTime = launchTime == null ? null : (Date) launchTime.clone();
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "The time when the application was created, e.g. 2016-03-16T01:01:49.000Z.")
-  @JsonProperty("launch_time")
-  public Date getLaunchTime() {
-    return launchTime == null ? null : (Date) launchTime.clone();
-  }
-
-  @XmlElement(name = "launch_time")
-  public void setLaunchTime(Date launchTime) {
-    this.launchTime = launchTime == null ? null : (Date) launchTime.clone();
-  }
-
-  /**
-   * Number of containers for each app-component in the application. Each
-   * app-component can further override this app-level global default.
-   **/
-  public Application numberOfContainers(Long numberOfContainers) {
-    this.numberOfContainers = numberOfContainers;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Number of containers for each app-component in the application. Each app-component can further override this app-level global default.")
-  @JsonProperty("number_of_containers")
-  public Long getNumberOfContainers() {
-    return numberOfContainers;
-  }
-
-  @XmlElement(name = "number_of_containers")
-  public void setNumberOfContainers(Long numberOfContainers) {
-    this.numberOfContainers = numberOfContainers;
-  }
-
-  /**
-   * In get response this provides the total number of running containers for
-   * this application (across all components) at the time of request. Note, a
-   * subsequent request can return a different number as and when more
-   * containers get allocated until it reaches the total number of containers or
-   * if a flex request has been made between the two requests.
-   **/
-  public Application numberOfRunningContainers(Long numberOfRunningContainers) {
-    this.numberOfRunningContainers = numberOfRunningContainers;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "In get response this provides the total number of running containers for this application (across all components) at the time of request. Note, a subsequent request can return a different number as and when more containers get allocated until it reaches the total number of containers or if a flex request has been made between the two requests.")
-  @JsonProperty("number_of_running_containers")
-  public Long getNumberOfRunningContainers() {
-    return numberOfRunningContainers;
-  }
-
-  @XmlElement(name = "number_of_running_containers")
-  public void setNumberOfRunningContainers(Long numberOfRunningContainers) {
-    this.numberOfRunningContainers = numberOfRunningContainers;
-  }
-
-  /**
-   * Life time (in seconds) of the application from the time it reaches the
-   * STARTED state (after which it is automatically destroyed by YARN). For
-   * unlimited lifetime do not set a lifetime value.
-   **/
-  public Application lifetime(Long lifetime) {
-    this.lifetime = lifetime;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Life time (in seconds) of the application from the time it reaches the STARTED state (after which it is automatically destroyed by YARN). For unlimited lifetime do not set a lifetime value.")
-  @JsonProperty("lifetime")
-  public Long getLifetime() {
-    return lifetime;
-  }
-
-  public void setLifetime(Long lifetime) {
-    this.lifetime = lifetime;
-  }
-
-  /**
-   * Advanced scheduling and placement policies (optional). If not specified, it
-   * defaults to the default placement policy of the app owner. The design of
-   * placement policies are in the works. It is not very clear at this point,
-   * how policies in conjunction with labels be exposed to application owners.
-   * This is a placeholder for now. The advanced structure of this attribute
-   * will be determined by YARN-4902.
-   **/
-  public Application placementPolicy(PlacementPolicy placementPolicy) {
-    this.placementPolicy = placementPolicy;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Advanced scheduling and placement policies (optional). If not specified, it defaults to the default placement policy of the app owner. The design of placement policies are in the works. It is not very clear at this point, how policies in conjunction with labels be exposed to application owners. This is a placeholder for now. The advanced structure of this attribute will be determined by YARN-4902.")
-  @JsonProperty("placement_policy")
-  public PlacementPolicy getPlacementPolicy() {
-    return placementPolicy;
-  }
-
-  @XmlElement(name = "placement_policy")
-  public void setPlacementPolicy(PlacementPolicy placementPolicy) {
-    this.placementPolicy = placementPolicy;
-  }
-
-  /**
-   * Components of an application.
-   **/
-  public Application components(List<Component> components) {
-    this.components = components;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Components of an application.")
-  @JsonProperty("components")
-  public List<Component> getComponents() {
-    return components;
-  }
-
-  public void setComponents(List<Component> components) {
-    this.components = components;
-  }
-
-  /**
-   * Config properties of an application. Configurations provided at the
-   * application/global level are available to all the components. Specific
-   * properties can be overridden at the component level.
-   **/
-  public Application configuration(Configuration configuration) {
-    this.configuration = configuration;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Config properties of an application. Configurations provided at the application/global level are available to all the components. Specific properties can be overridden at the component level.")
-  @JsonProperty("configuration")
-  public Configuration getConfiguration() {
-    return configuration;
-  }
-
-  public void setConfiguration(Configuration configuration) {
-    this.configuration = configuration;
-  }
-
-  /**
-   * Containers of a started application. Specifying a value for this attribute
-   * for the POST payload raises a validation error. This blob is available only
-   * in the GET response of a started application.
-   **/
-  public Application containers(List<Container> containers) {
-    this.containers = containers;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Containers of a started application. Specifying a value for this attribute for the POST payload raises a validation error. This blob is available only in the GET response of a started application.")
-  @JsonProperty("containers")
-  public List<Container> getContainers() {
-    return containers;
-  }
-
-  public void setContainers(List<Container> containers) {
-    this.containers = containers;
-  }
-
-  public void addContainer(Container container) {
-    this.containers.add(container);
-  }
-
-  /**
-   * State of the application. Specifying a value for this attribute for the
-   * POST payload raises a validation error. This attribute is available only in
-   * the GET response of a started application.
-   **/
-  public Application state(ApplicationState state) {
-    this.state = state;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "State of the application. Specifying a value for this attribute for the POST payload raises a validation error. This attribute is available only in the GET response of a started application.")
-  @JsonProperty("state")
-  public ApplicationState getState() {
-    return state;
-  }
-
-  public void setState(ApplicationState state) {
-    this.state = state;
-  }
-
-  /**
-   * A blob of key-value pairs of quicklinks to be exported for an application.
-   **/
-  public Application quicklinks(Map<String, String> quicklinks) {
-    this.quicklinks = quicklinks;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "A blob of key-value pairs of quicklinks to be exported for an application.")
-  @JsonProperty("quicklinks")
-  public Map<String, String> getQuicklinks() {
-    return quicklinks;
-  }
-
-  public void setQuicklinks(Map<String, String> quicklinks) {
-    this.quicklinks = quicklinks;
-  }
-
-  /**
-   * The YARN queue that this application should be submitted to.
-   **/
-  public Application queue(String queue) {
-    this.queue = queue;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "The YARN queue that this application should be submitted to.")
-  @JsonProperty("queue")
-  public String getQueue() {
-    return queue;
-  }
-
-  public void setQueue(String queue) {
-    this.queue = queue;
-  }
-
-  @Override
-  public boolean equals(java.lang.Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    Application application = (Application) o;
-    return Objects.equals(this.name, application.name);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(name);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("class Application {\n");
-
-    sb.append("    name: ").append(toIndentedString(name)).append("\n");
-    sb.append("    id: ").append(toIndentedString(id)).append("\n");
-    sb.append("    artifact: ").append(toIndentedString(artifact)).append("\n");
-    sb.append("    resource: ").append(toIndentedString(resource)).append("\n");
-    sb.append("    launchCommand: ").append(toIndentedString(launchCommand))
-        .append("\n");
-    sb.append("    launchTime: ").append(toIndentedString(launchTime))
-        .append("\n");
-    sb.append("    numberOfContainers: ")
-        .append(toIndentedString(numberOfContainers)).append("\n");
-    sb.append("    numberOfRunningContainers: ")
-        .append(toIndentedString(numberOfRunningContainers)).append("\n");
-    sb.append("    lifetime: ").append(toIndentedString(lifetime)).append("\n");
-    sb.append("    placementPolicy: ").append(toIndentedString(placementPolicy))
-        .append("\n");
-    sb.append("    components: ").append(toIndentedString(components))
-        .append("\n");
-    sb.append("    configuration: ").append(toIndentedString(configuration))
-        .append("\n");
-    sb.append("    containers: ").append(toIndentedString(containers))
-        .append("\n");
-    sb.append("    state: ").append(toIndentedString(state)).append("\n");
-    sb.append("    quicklinks: ").append(toIndentedString(quicklinks))
-        .append("\n");
-    sb.append("    queue: ").append(toIndentedString(queue)).append("\n");
-    sb.append("}");
-    return sb.toString();
-  }
-
-  /**
-   * Convert the given object to string with each line indented by 4 spaces
-   * (except the first line).
-   */
-  private String toIndentedString(java.lang.Object o) {
-    if (o == null) {
-      return "null";
-    }
-    return o.toString().replace("\n", "\n    ");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ApplicationState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ApplicationState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ApplicationState.java
deleted file mode 100644
index 7f90a9e..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ApplicationState.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.resource;
-
-import io.swagger.annotations.ApiModel;
-
-/**
- * The current state of an application.
- **/
-
-@ApiModel(description = "The current state of an application.")
-@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
-public enum ApplicationState {
-  ACCEPTED, STARTED, READY, STOPPED, FAILED;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ApplicationStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ApplicationStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ApplicationStatus.java
deleted file mode 100644
index ed826d8..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ApplicationStatus.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.resource;
-
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.util.Objects;
-
-import javax.xml.bind.annotation.XmlRootElement;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-/**
- * The current status of a submitted application, returned as a response to the
- * GET API.
- **/
-
-@ApiModel(description = "The current status of a submitted application, returned as a response to the GET API.")
-@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
-@XmlRootElement
-@JsonInclude(JsonInclude.Include.NON_NULL)
-public class ApplicationStatus extends BaseResource {
-  private static final long serialVersionUID = -3469885905347851034L;
-
-  private String diagnostics = null;
-  private ApplicationState state = null;
-  private Integer code = null;
-
-  /**
-   * Diagnostic information (if any) for the reason of the current state of the
-   * application. It typically has a non-null value, if the application is in a
-   * non-running state.
-   **/
-  public ApplicationStatus diagnostics(String diagnostics) {
-    this.diagnostics = diagnostics;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Diagnostic information (if any) for the reason of the current state of the application. It typically has a non-null value, if the application is in a non-running state.")
-  @JsonProperty("diagnostics")
-  public String getDiagnostics() {
-    return diagnostics;
-  }
-
-  public void setDiagnostics(String diagnostics) {
-    this.diagnostics = diagnostics;
-  }
-
-  /**
-   * Application state.
-   **/
-  public ApplicationStatus state(ApplicationState state) {
-    this.state = state;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Application state.")
-  @JsonProperty("state")
-  public ApplicationState getState() {
-    return state;
-  }
-
-  public void setState(ApplicationState state) {
-    this.state = state;
-  }
-
-  /**
-   * An error code specific to a scenario which app owners should be able to use
-   * to understand the failure in addition to the diagnostic information.
-   **/
-  public ApplicationStatus code(Integer code) {
-    this.code = code;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "An error code specific to a scenario which app owners should be able to use to understand the failure in addition to the diagnostic information.")
-  @JsonProperty("code")
-  public Integer getCode() {
-    return code;
-  }
-
-  public void setCode(Integer code) {
-    this.code = code;
-  }
-
-  @Override
-  public boolean equals(java.lang.Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    ApplicationStatus applicationStatus = (ApplicationStatus) o;
-    return Objects.equals(this.diagnostics, applicationStatus.diagnostics)
-        && Objects.equals(this.state, applicationStatus.state)
-        && Objects.equals(this.code, applicationStatus.code);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(diagnostics, state, code);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("class ApplicationStatus {\n");
-
-    sb.append("    diagnostics: ").append(toIndentedString(diagnostics))
-        .append("\n");
-    sb.append("    state: ").append(toIndentedString(state)).append("\n");
-    sb.append("    code: ").append(toIndentedString(code)).append("\n");
-    sb.append("}");
-    return sb.toString();
-  }
-
-  /**
-   * Convert the given object to string with each line indented by 4 spaces
-   * (except the first line).
-   */
-  private String toIndentedString(java.lang.Object o) {
-    if (o == null) {
-      return "null";
-    }
-    return o.toString().replace("\n", "\n    ");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Artifact.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Artifact.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Artifact.java
deleted file mode 100644
index 87fcf89..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Artifact.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.resource;
-
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.io.Serializable;
-import java.util.Objects;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonValue;
-
-/**
- * Artifact of an application component.
- **/
-
-@ApiModel(description = "Artifact of an application component")
-@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
-@JsonInclude(JsonInclude.Include.NON_NULL)
-public class Artifact implements Serializable {
-  private static final long serialVersionUID = 3608929500111099035L;
-
-  private String id = null;
-
-  public enum TypeEnum {
-    DOCKER("DOCKER"), TARBALL("TARBALL"), APPLICATION("APPLICATION");
-
-    private String value;
-
-    TypeEnum(String value) {
-      this.value = value;
-    }
-
-    @Override
-    @JsonValue
-    public String toString() {
-      return value;
-    }
-  }
-
-  private TypeEnum type = TypeEnum.DOCKER;
-  private String uri = null;
-
-  /**
-   * Artifact id. Examples are package location uri for tarball based apps,
-   * image name for docker, etc.
-   **/
-  public Artifact id(String id) {
-    this.id = id;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", required = true, value = "Artifact id. Examples are package location uri for tarball based apps, image name for docker, etc.")
-  @JsonProperty("id")
-  public String getId() {
-    return id;
-  }
-
-  public void setId(String id) {
-    this.id = id;
-  }
-
-  /**
-   * Artifact type, like docker, tarball, etc. (optional).
-   **/
-  public Artifact type(TypeEnum type) {
-    this.type = type;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Artifact type, like docker, tarball, etc. (optional).")
-  @JsonProperty("type")
-  public TypeEnum getType() {
-    return type;
-  }
-
-  public void setType(TypeEnum type) {
-    this.type = type;
-  }
-
-  /**
-   * Artifact location to support multiple artifact stores (optional).
-   **/
-  public Artifact uri(String uri) {
-    this.uri = uri;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Artifact location to support multiple artifact stores (optional).")
-  @JsonProperty("uri")
-  public String getUri() {
-    return uri;
-  }
-
-  public void setUri(String uri) {
-    this.uri = uri;
-  }
-
-  @Override
-  public boolean equals(java.lang.Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    Artifact artifact = (Artifact) o;
-    return Objects.equals(this.id, artifact.id)
-        && Objects.equals(this.type, artifact.type)
-        && Objects.equals(this.uri, artifact.uri);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(id, type, uri);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("class Artifact {\n");
-
-    sb.append("    id: ").append(toIndentedString(id)).append("\n");
-    sb.append("    type: ").append(toIndentedString(type)).append("\n");
-    sb.append("    uri: ").append(toIndentedString(uri)).append("\n");
-    sb.append("}");
-    return sb.toString();
-  }
-
-  /**
-   * Convert the given object to string with each line indented by 4 spaces
-   * (except the first line).
-   */
-  private String toIndentedString(java.lang.Object o) {
-    if (o == null) {
-      return "null";
-    }
-    return o.toString().replace("\n", "\n    ");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/BaseResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/BaseResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/BaseResource.java
deleted file mode 100644
index 3b2c8b1..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/BaseResource.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.resource;
-
-import java.io.Serializable;
-
-public class BaseResource implements Serializable {
-  private static final long serialVersionUID = 1492603053176889431L;
-
-  private String uri;
-
-  /**
-   * Resource location, e.g. \
-   * "/applications/helloworld/containers/container_e3751_1458061340047_0008_01_000002\
-   * "
-   **/
-  public String getUri() {
-    return uri;
-  }
-
-  public void setUri(String uri) {
-    this.uri = uri;
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder builder = new StringBuilder();
-    builder.append("BaseResource [uri=");
-    builder.append(uri);
-    builder.append("]");
-    return builder.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Component.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Component.java
deleted file mode 100644
index 75f579a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Component.java
+++ /dev/null
@@ -1,381 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.resource;
-
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-/**
- * One or more components of the application. If the application is HBase say,
- * then the component can be a simple role like master or regionserver. If the
- * application is a complex business webapp then a component can be other
- * applications say Kafka or Storm. Thereby it opens up the support for complex
- * and nested applications.
- **/
-
-@ApiModel(description = "One or more components of the application. If the application is HBase say, then the component can be a simple role like master or regionserver. If the application is a complex business webapp then a component can be other applications say Kafka or Storm. Thereby it opens up the support for complex and nested applications.")
-@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
-@XmlRootElement
-@JsonInclude(JsonInclude.Include.NON_NULL)
-public class Component implements Serializable {
-  private static final long serialVersionUID = -8430058381509087805L;
-
-  private String name = null;
-  private List<String> dependencies = new ArrayList<String>();
-  private ReadinessCheck readinessCheck = null;
-  private Artifact artifact = null;
-  private String launchCommand = null;
-  private Resource resource = null;
-  private Long numberOfContainers = null;
-  private Boolean uniqueComponentSupport = null;
-  private Boolean runPrivilegedContainer = null;
-  private PlacementPolicy placementPolicy = null;
-  private Configuration configuration = null;
-  private List<String> quicklinks = new ArrayList<String>();
-
-  /**
-   * Name of the application component (mandatory).
-   **/
-  public Component name(String name) {
-    this.name = name;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", required = true, value = "Name of the application component (mandatory).")
-  @JsonProperty("name")
-  public String getName() {
-    return name;
-  }
-
-  public void setName(String name) {
-    this.name = name;
-  }
-
-  /**
-   * An array of application components which should be in READY state (as
-   * defined by readiness check), before this component can be started. The
-   * dependencies across all components of an application should be represented
-   * as a DAG.
-   **/
-  public Component dependencies(List<String> dependencies) {
-    this.dependencies = dependencies;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "An array of application components which should be in READY state (as defined by readiness check), before this component can be started. The dependencies across all components of an application should be represented as a DAG.")
-  @JsonProperty("dependencies")
-  public List<String> getDependencies() {
-    return dependencies;
-  }
-
-  public void setDependencies(List<String> dependencies) {
-    this.dependencies = dependencies;
-  }
-
-  /**
-   * Readiness check for this app-component.
-   **/
-  public Component readinessCheck(ReadinessCheck readinessCheck) {
-    this.readinessCheck = readinessCheck;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Readiness check for this app-component.")
-  @JsonProperty("readiness_check")
-  public ReadinessCheck getReadinessCheck() {
-    return readinessCheck;
-  }
-
-  @XmlElement(name = "readiness_check")
-  public void setReadinessCheck(ReadinessCheck readinessCheck) {
-    this.readinessCheck = readinessCheck;
-  }
-
-  /**
-   * Artifact of the component (optional). If not specified, the application
-   * level global artifact takes effect.
-   **/
-  public Component artifact(Artifact artifact) {
-    this.artifact = artifact;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Artifact of the component (optional). If not specified, the application level global artifact takes effect.")
-  @JsonProperty("artifact")
-  public Artifact getArtifact() {
-    return artifact;
-  }
-
-  public void setArtifact(Artifact artifact) {
-    this.artifact = artifact;
-  }
-
-  /**
-   * The custom launch command of this component (optional). When specified at
-   * the component level, it overrides the value specified at the global level
-   * (if any).
-   **/
-  public Component launchCommand(String launchCommand) {
-    this.launchCommand = launchCommand;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "The custom launch command of this component (optional). When specified at the component level, it overrides the value specified at the global level (if any).")
-  @JsonProperty("launch_command")
-  public String getLaunchCommand() {
-    return launchCommand;
-  }
-
-  @XmlElement(name = "launch_command")
-  public void setLaunchCommand(String launchCommand) {
-    this.launchCommand = launchCommand;
-  }
-
-  /**
-   * Resource of this component (optional). If not specified, the application
-   * level global resource takes effect.
-   **/
-  public Component resource(Resource resource) {
-    this.resource = resource;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Resource of this component (optional). If not specified, the application level global resource takes effect.")
-  @JsonProperty("resource")
-  public Resource getResource() {
-    return resource;
-  }
-
-  public void setResource(Resource resource) {
-    this.resource = resource;
-  }
-
-  /**
-   * Number of containers for this app-component (optional). If not specified,
-   * the application level global number_of_containers takes effect.
-   **/
-  public Component numberOfContainers(Long numberOfContainers) {
-    this.numberOfContainers = numberOfContainers;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Number of containers for this app-component (optional). If not specified, the application level global number_of_containers takes effect.")
-  @JsonProperty("number_of_containers")
-  public Long getNumberOfContainers() {
-    return numberOfContainers;
-  }
-
-  @XmlElement(name = "number_of_containers")
-  public void setNumberOfContainers(Long numberOfContainers) {
-    this.numberOfContainers = numberOfContainers;
-  }
-
-  /**
-   * Certain applications need to define multiple components using the same
-   * artifact and resource profile, differing only in configurations. In such
-   * cases, this field helps app owners to avoid creating multiple component
-   * definitions with repeated information. The number_of_containers field
-   * dictates the initial number of components created. Component names
-   * typically differ with a trailing id, but assumptions should not be made on
-   * that, as the algorithm can change at any time. Configurations section will
-   * be able to use placeholders like ${USER}, ${CLUSTER_NAME} and
-   * ${COMPONENT_NAME} to be replaced at runtime with user the app is submitted
-   * as, application name and application component name respectively. Launch
-   * command can use placeholders like ${APP_COMPONENT_NAME} and ${APP_NAME} to
-   * get its component name and app name respectively at runtime. The best part
-   * of this feature is that when the component is flexed up, entirely new
-   * components (with new trailing ids) are created.
-   **/
-  public Component uniqueComponentSupport(Boolean uniqueComponentSupport) {
-    this.uniqueComponentSupport = uniqueComponentSupport;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Certain applications need to define multiple components using the same artifact and resource profile, differing only in configurations. In such cases, this field helps app owners to avoid creating multiple component definitions with repeated information. The number_of_containers field dictates the initial number of components created. Component names typically differ with a trailing id, but assumptions should not be made on that, as the algorithm can change at any time. Configurations section will be able to use placeholders like ${USER}, ${CLUSTER_NAME} and ${COMPONENT_NAME} to be replaced at runtime with user the app is submitted as, application name and application component name respectively. Launch command can use placeholders like ${APP_COMPONENT_NAME} and ${APP_NAME} to get its component name and app name respectively at runtime. The best part of this feature is that when the component is flexed up, entirely new components (with 
 new trailing ids) are created.")
-  @JsonProperty("unique_component_support")
-  public Boolean getUniqueComponentSupport() {
-    return uniqueComponentSupport;
-  }
-
-  @XmlElement(name = "unique_component_support")
-  public void setUniqueComponentSupport(Boolean uniqueComponentSupport) {
-    this.uniqueComponentSupport = uniqueComponentSupport;
-  }
-
-  /**
-   * Run all containers of this component in privileged mode (YARN-4262).
-   **/
-  public Component runPrivilegedContainer(Boolean runPrivilegedContainer) {
-    this.runPrivilegedContainer = runPrivilegedContainer;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Run all containers of this component in privileged mode (YARN-4262).")
-  @JsonProperty("run_privileged_container")
-  public Boolean getRunPrivilegedContainer() {
-    return runPrivilegedContainer;
-  }
-
-  @XmlElement(name = "run_privileged_container")
-  public void setRunPrivilegedContainer(Boolean runPrivilegedContainer) {
-    this.runPrivilegedContainer = runPrivilegedContainer;
-  }
-
-  /**
-   * Advanced scheduling and placement policies for all containers of this
-   * component (optional). If not specified, the app level placement_policy
-   * takes effect. Refer to the description at the global level for more
-   * details.
-   **/
-  public Component placementPolicy(PlacementPolicy placementPolicy) {
-    this.placementPolicy = placementPolicy;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Advanced scheduling and placement policies for all containers of this component (optional). If not specified, the app level placement_policy takes effect. Refer to the description at the global level for more details.")
-  @JsonProperty("placement_policy")
-  public PlacementPolicy getPlacementPolicy() {
-    return placementPolicy;
-  }
-
-  @XmlElement(name = "placement_policy")
-  public void setPlacementPolicy(PlacementPolicy placementPolicy) {
-    this.placementPolicy = placementPolicy;
-  }
-
-  /**
-   * Config properties for this app-component.
-   **/
-  public Component configuration(Configuration configuration) {
-    this.configuration = configuration;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Config properties for this app-component.")
-  @JsonProperty("configuration")
-  public Configuration getConfiguration() {
-    return configuration;
-  }
-
-  public void setConfiguration(Configuration configuration) {
-    this.configuration = configuration;
-  }
-
-  /**
-   * A list of quicklink keys defined at the application level, and to be
-   * resolved by this component.
-   **/
-  public Component quicklinks(List<String> quicklinks) {
-    this.quicklinks = quicklinks;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "A list of quicklink keys defined at the application level, and to be resolved by this component.")
-  @JsonProperty("quicklinks")
-  public List<String> getQuicklinks() {
-    return quicklinks;
-  }
-
-  public void setQuicklinks(List<String> quicklinks) {
-    this.quicklinks = quicklinks;
-  }
-
-  @Override
-  public boolean equals(java.lang.Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    Component component = (Component) o;
-    return Objects.equals(this.name, component.name)
-        && Objects.equals(this.dependencies, component.dependencies)
-        && Objects.equals(this.readinessCheck, component.readinessCheck)
-        && Objects.equals(this.artifact, component.artifact)
-        && Objects.equals(this.launchCommand, component.launchCommand)
-        && Objects.equals(this.resource, component.resource)
-        && Objects.equals(this.numberOfContainers, component.numberOfContainers)
-        && Objects.equals(this.uniqueComponentSupport,
-            component.uniqueComponentSupport)
-        && Objects.equals(this.runPrivilegedContainer,
-            component.runPrivilegedContainer)
-        && Objects.equals(this.placementPolicy, component.placementPolicy)
-        && Objects.equals(this.configuration, component.configuration)
-        && Objects.equals(this.quicklinks, component.quicklinks);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(name, dependencies, readinessCheck, artifact,
-        launchCommand, resource, numberOfContainers, uniqueComponentSupport,
-        runPrivilegedContainer, placementPolicy, configuration, quicklinks);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("class Component {\n");
-
-    sb.append("    name: ").append(toIndentedString(name)).append("\n");
-    sb.append("    dependencies: ").append(toIndentedString(dependencies))
-        .append("\n");
-    sb.append("    readinessCheck: ").append(toIndentedString(readinessCheck))
-        .append("\n");
-    sb.append("    artifact: ").append(toIndentedString(artifact)).append("\n");
-    sb.append("    launchCommand: ").append(toIndentedString(launchCommand))
-        .append("\n");
-    sb.append("    resource: ").append(toIndentedString(resource)).append("\n");
-    sb.append("    numberOfContainers: ")
-        .append(toIndentedString(numberOfContainers)).append("\n");
-    sb.append("    uniqueComponentSupport: ")
-        .append(toIndentedString(uniqueComponentSupport)).append("\n");
-    sb.append("    runPrivilegedContainer: ")
-        .append(toIndentedString(runPrivilegedContainer)).append("\n");
-    sb.append("    placementPolicy: ").append(toIndentedString(placementPolicy))
-        .append("\n");
-    sb.append("    configuration: ").append(toIndentedString(configuration))
-        .append("\n");
-    sb.append("    quicklinks: ").append(toIndentedString(quicklinks))
-        .append("\n");
-    sb.append("}");
-    return sb.toString();
-  }
-
-  /**
-   * Convert the given object to string with each line indented by 4 spaces
-   * (except the first line).
-   */
-  private String toIndentedString(java.lang.Object o) {
-    if (o == null) {
-      return "null";
-    }
-    return o.toString().replace("\n", "\n    ");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ConfigFile.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ConfigFile.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ConfigFile.java
deleted file mode 100644
index 01d976f..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ConfigFile.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.resource;
-
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.io.Serializable;
-import java.util.Objects;
-
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonValue;
-
-/**
- * A config file that needs to be created and made available as a volume in an
- * application component container.
- **/
-
-@ApiModel(description = "A config file that needs to be created and made available as a volume in an application component container.")
-@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
-@XmlRootElement
-@JsonInclude(JsonInclude.Include.NON_NULL)
-public class ConfigFile implements Serializable {
-  private static final long serialVersionUID = -7009402089417704612L;
-
-  public enum TypeEnum {
-    XML("XML"), PROPERTIES("PROPERTIES"), JSON("JSON"), YAML("YAML"), TEMPLATE(
-        "TEMPLATE"), ENV("ENV"), HADOOP_XML("HADOOP_XML");
-
-    private String value;
-
-    TypeEnum(String value) {
-      this.value = value;
-    }
-
-    @Override
-    @JsonValue
-    public String toString() {
-      return value;
-    }
-  }
-
-  private TypeEnum type = null;
-  private String destFile = null;
-  private String srcFile = null;
-  private Object props = null;
-
-  /**
-   * Config file in the standard format like xml, properties, json, yaml,
-   * template.
-   **/
-  public ConfigFile type(TypeEnum type) {
-    this.type = type;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Config file in the standard format like xml, properties, json, yaml, template.")
-  @JsonProperty("type")
-  public TypeEnum getType() {
-    return type;
-  }
-
-  public void setType(TypeEnum type) {
-    this.type = type;
-  }
-
-  /**
-   * The absolute path that this configuration file should be mounted as, in the
-   * application container.
-   **/
-  public ConfigFile destFile(String destFile) {
-    this.destFile = destFile;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "The absolute path that this configuration file should be mounted as, in the application container.")
-  @JsonProperty("dest_file")
-  public String getDestFile() {
-    return destFile;
-  }
-
-  @XmlElement(name = "dest_file")
-  public void setDestFile(String destFile) {
-    this.destFile = destFile;
-  }
-
-  /**
-   * Required for type template. This provides the source location of the
-   * template which needs to be mounted as dest_file post property
-   * substitutions. Typically the src_file would point to a source controlled
-   * network accessible file maintained by tools like puppet, chef, etc.
-   **/
-  public ConfigFile srcFile(String srcFile) {
-    this.srcFile = srcFile;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Required for type template. This provides the source location of the template which needs to be mounted as dest_file post property substitutions. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, etc.")
-  @JsonProperty("src_file")
-  public String getSrcFile() {
-    return srcFile;
-  }
-
-  @XmlElement(name = "src_file")
-  public void setSrcFile(String srcFile) {
-    this.srcFile = srcFile;
-  }
-
-  /**
-   * A blob of key value pairs that will be dumped in the dest_file in the
-   * format as specified in type. If the type is template then the attribute
-   * src_file is mandatory and the src_file content is dumped to dest_file post
-   * property substitutions.
-   **/
-  public ConfigFile props(Object props) {
-    this.props = props;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If the type is template then the attribute src_file is mandatory and the src_file content is dumped to dest_file post property substitutions.")
-  @JsonProperty("props")
-  public Object getProps() {
-    return props;
-  }
-
-  public void setProps(Object props) {
-    this.props = props;
-  }
-
-  @Override
-  public boolean equals(java.lang.Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    ConfigFile configFile = (ConfigFile) o;
-    return Objects.equals(this.type, configFile.type)
-        && Objects.equals(this.destFile, configFile.destFile)
-        && Objects.equals(this.srcFile, configFile.srcFile)
-        && Objects.equals(this.props, configFile.props);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(type, destFile, srcFile, props);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("class ConfigFile {\n");
-
-    sb.append("    type: ").append(toIndentedString(type)).append("\n");
-    sb.append("    destFile: ").append(toIndentedString(destFile)).append("\n");
-    sb.append("    srcFile: ").append(toIndentedString(srcFile)).append("\n");
-    sb.append("    props: ").append(toIndentedString(props)).append("\n");
-    sb.append("}");
-    return sb.toString();
-  }
-
-  /**
-   * Convert the given object to string with each line indented by 4 spaces
-   * (except the first line).
-   */
-  private String toIndentedString(java.lang.Object o) {
-    if (o == null) {
-      return "null";
-    }
-    return o.toString().replace("\n", "\n    ");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Configuration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Configuration.java
deleted file mode 100644
index 908220a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Configuration.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.resource;
-
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-/**
- * Set of configuration properties that can be injected into the application
- * components via envs, files and custom pluggable helper docker containers.
- * Files of several standard formats like xml, properties, json, yaml and
- * templates will be supported.
- **/
-
-@ApiModel(description = "Set of configuration properties that can be injected into the application components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported.")
-@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
-@JsonInclude(JsonInclude.Include.NON_NULL)
-public class Configuration implements Serializable {
-  private static final long serialVersionUID = -4330788704981074466L;
-
-  private Map<String, String> properties = new HashMap<String, String>();
-  private Map<String, String> env = new HashMap<String, String>();
-  private List<ConfigFile> files = new ArrayList<ConfigFile>();
-
-  /**
-   * A blob of key-value pairs of common application properties.
-   **/
-  public Configuration properties(Map<String, String> properties) {
-    this.properties = properties;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "A blob of key-value pairs of common application properties.")
-  @JsonProperty("properties")
-  public Map<String, String> getProperties() {
-    return properties;
-  }
-
-  public void setProperties(Map<String, String> properties) {
-    this.properties = properties;
-  }
-
-  /**
-   * A blob of key-value pairs which will be appended to the default system
-   * properties and handed off to the application at start time. All placeholder
-   * references to properties will be substituted before injection.
-   **/
-  public Configuration env(Map<String, String> env) {
-    this.env = env;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "A blob of key-value pairs which will be appended to the default system properties and handed off to the application at start time. All placeholder references to properties will be substituted before injection.")
-  @JsonProperty("env")
-  public Map<String, String> getEnv() {
-    return env;
-  }
-
-  public void setEnv(Map<String, String> env) {
-    this.env = env;
-  }
-
-  /**
-   * Array of list of files that needs to be created and made available as
-   * volumes in the application component containers.
-   **/
-  public Configuration files(List<ConfigFile> files) {
-    this.files = files;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Array of list of files that needs to be created and made available as volumes in the application component containers.")
-  @JsonProperty("files")
-  public List<ConfigFile> getFiles() {
-    return files;
-  }
-
-  public void setFiles(List<ConfigFile> files) {
-    this.files = files;
-  }
-
-  @Override
-  public boolean equals(java.lang.Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    Configuration configuration = (Configuration) o;
-    return Objects.equals(this.properties, configuration.properties)
-        && Objects.equals(this.env, configuration.env)
-        && Objects.equals(this.files, configuration.files);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(properties, env, files);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("class Configuration {\n");
-
-    sb.append("    properties: ").append(toIndentedString(properties))
-        .append("\n");
-    sb.append("    env: ").append(toIndentedString(env)).append("\n");
-    sb.append("    files: ").append(toIndentedString(files)).append("\n");
-    sb.append("}");
-    return sb.toString();
-  }
-
-  /**
-   * Convert the given object to string with each line indented by 4 spaces
-   * (except the first line).
-   */
-  private String toIndentedString(java.lang.Object o) {
-    if (o == null) {
-      return "null";
-    }
-    return o.toString().replace("\n", "\n    ");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Container.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Container.java
deleted file mode 100644
index 24aada7..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Container.java
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.resource;
-
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.util.Date;
-import java.util.Objects;
-
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-/**
- * An instance of a running application container.
- **/
-
-@ApiModel(description = "An instance of a running application container")
-@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
-@XmlRootElement
-@JsonInclude(JsonInclude.Include.NON_NULL)
-public class Container extends BaseResource {
-  private static final long serialVersionUID = -8955788064529288L;
-
-  private String id = null;
-  private Date launchTime = null;
-  private String ip = null;
-  private String hostname = null;
-  private String bareHost = null;
-  private ContainerState state = null;
-  private String componentName = null;
-  private Resource resource = null;
-  private Artifact artifact = null;
-  private Boolean privilegedContainer = null;
-
-  /**
-   * Unique container id of a running application, e.g.
-   * container_e3751_1458061340047_0008_01_000002.
-   **/
-  public Container id(String id) {
-    this.id = id;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Unique container id of a running application, e.g. container_e3751_1458061340047_0008_01_000002.")
-  @JsonProperty("id")
-  public String getId() {
-    return id;
-  }
-
-  public void setId(String id) {
-    this.id = id;
-  }
-
-  /**
-   * The time when the container was created, e.g. 2016-03-16T01:01:49.000Z.
-   * This will most likely be different from cluster launch time.
-   **/
-  public Container launchTime(Date launchTime) {
-    this.launchTime = launchTime == null ? null : (Date) launchTime.clone();
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "The time when the container was created, e.g. 2016-03-16T01:01:49.000Z. This will most likely be different from cluster launch time.")
-  @JsonProperty("launch_time")
-  public Date getLaunchTime() {
-    return launchTime == null ? null : (Date) launchTime.clone();
-  }
-
-  @XmlElement(name = "launch_time")
-  public void setLaunchTime(Date launchTime) {
-    this.launchTime = launchTime == null ? null : (Date) launchTime.clone();
-  }
-
-  /**
-   * IP address of a running container, e.g. 172.31.42.141. The IP address and
-   * hostname attribute values are dependent on the cluster/docker network setup
-   * as per YARN-4007.
-   **/
-  public Container ip(String ip) {
-    this.ip = ip;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "IP address of a running container, e.g. 172.31.42.141. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.")
-  @JsonProperty("ip")
-  public String getIp() {
-    return ip;
-  }
-
-  public void setIp(String ip) {
-    this.ip = ip;
-  }
-
-  /**
-   * Fully qualified hostname of a running container, e.g.
-   * ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and
-   * hostname attribute values are dependent on the cluster/docker network setup
-   * as per YARN-4007.
-   **/
-  public Container hostname(String hostname) {
-    this.hostname = hostname;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Fully qualified hostname of a running container, e.g. ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.")
-  @JsonProperty("hostname")
-  public String getHostname() {
-    return hostname;
-  }
-
-  public void setHostname(String hostname) {
-    this.hostname = hostname;
-  }
-
-  /**
-   * The bare node or host in which the container is running, e.g.
-   * cn008.example.com.
-   **/
-  public Container bareHost(String bareHost) {
-    this.bareHost = bareHost;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "The bare node or host in which the container is running, e.g. cn008.example.com.")
-  @JsonProperty("bare_host")
-  public String getBareHost() {
-    return bareHost;
-  }
-
-  @XmlElement(name = "bare_host")
-  public void setBareHost(String bareHost) {
-    this.bareHost = bareHost;
-  }
-
-  /**
-   * State of the container of an application.
-   **/
-  public Container state(ContainerState state) {
-    this.state = state;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "State of the container of an application.")
-  @JsonProperty("state")
-  public ContainerState getState() {
-    return state;
-  }
-
-  public void setState(ContainerState state) {
-    this.state = state;
-  }
-
-  /**
-   * Name of the component that this container instance belongs to.
-   **/
-  public Container componentName(String componentName) {
-    this.componentName = componentName;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Name of the component that this container instance belongs to.")
-  @JsonProperty("component_name")
-  public String getComponentName() {
-    return componentName;
-  }
-
-  @XmlElement(name = "component_name")
-  public void setComponentName(String componentName) {
-    this.componentName = componentName;
-  }
-
-  /**
-   * Resource used for this container.
-   **/
-  public Container resource(Resource resource) {
-    this.resource = resource;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Resource used for this container.")
-  @JsonProperty("resource")
-  public Resource getResource() {
-    return resource;
-  }
-
-  public void setResource(Resource resource) {
-    this.resource = resource;
-  }
-
-  /**
-   * Artifact used for this container.
-   **/
-  public Container artifact(Artifact artifact) {
-    this.artifact = artifact;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Artifact used for this container.")
-  @JsonProperty("artifact")
-  public Artifact getArtifact() {
-    return artifact;
-  }
-
-  public void setArtifact(Artifact artifact) {
-    this.artifact = artifact;
-  }
-
-  /**
-   * Container running in privileged mode or not.
-   **/
-  public Container privilegedContainer(Boolean privilegedContainer) {
-    this.privilegedContainer = privilegedContainer;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Container running in privileged mode or not.")
-  @JsonProperty("privileged_container")
-  public Boolean getPrivilegedContainer() {
-    return privilegedContainer;
-  }
-
-  public void setPrivilegedContainer(Boolean privilegedContainer) {
-    this.privilegedContainer = privilegedContainer;
-  }
-
-  @Override
-  public boolean equals(java.lang.Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    Container container = (Container) o;
-    return Objects.equals(this.id, container.id);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(id);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("class Container {\n");
-
-    sb.append("    id: ").append(toIndentedString(id)).append("\n");
-    sb.append("    launchTime: ").append(toIndentedString(launchTime))
-        .append("\n");
-    sb.append("    ip: ").append(toIndentedString(ip)).append("\n");
-    sb.append("    hostname: ").append(toIndentedString(hostname)).append("\n");
-    sb.append("    bareHost: ").append(toIndentedString(bareHost)).append("\n");
-    sb.append("    state: ").append(toIndentedString(state)).append("\n");
-    sb.append("    componentName: ").append(toIndentedString(componentName))
-        .append("\n");
-    sb.append("    resource: ").append(toIndentedString(resource)).append("\n");
-    sb.append("    artifact: ").append(toIndentedString(artifact)).append("\n");
-    sb.append("    privilegedContainer: ")
-        .append(toIndentedString(privilegedContainer)).append("\n");
-    sb.append("}");
-    return sb.toString();
-  }
-
-  /**
-   * Convert the given object to string with each line indented by 4 spaces
-   * (except the first line).
-   */
-  private String toIndentedString(java.lang.Object o) {
-    if (o == null) {
-      return "null";
-    }
-    return o.toString().replace("\n", "\n    ");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ContainerState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ContainerState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ContainerState.java
deleted file mode 100644
index cb017fb..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ContainerState.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.resource;
-
-/**
- * The current state of the container of an application.
- **/
-public enum ContainerState {
-  INIT, READY;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Error.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Error.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Error.java
deleted file mode 100644
index 91c4e3a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Error.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.resource;
-
-import io.swagger.annotations.ApiModelProperty;
-
-import java.util.Objects;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
-public class Error {
-
-  private Integer code = null;
-  private String message = null;
-  private String fields = null;
-
-  /**
-   **/
-  public Error code(Integer code) {
-    this.code = code;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "")
-  @JsonProperty("code")
-  public Integer getCode() {
-    return code;
-  }
-
-  public void setCode(Integer code) {
-    this.code = code;
-  }
-
-  /**
-   **/
-  public Error message(String message) {
-    this.message = message;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "")
-  @JsonProperty("message")
-  public String getMessage() {
-    return message;
-  }
-
-  public void setMessage(String message) {
-    this.message = message;
-  }
-
-  /**
-   **/
-  public Error fields(String fields) {
-    this.fields = fields;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "")
-  @JsonProperty("fields")
-  public String getFields() {
-    return fields;
-  }
-
-  public void setFields(String fields) {
-    this.fields = fields;
-  }
-
-  @Override
-  public boolean equals(java.lang.Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    Error error = (Error) o;
-    return Objects.equals(this.code, error.code)
-        && Objects.equals(this.message, error.message)
-        && Objects.equals(this.fields, error.fields);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(code, message, fields);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("class Error {\n");
-
-    sb.append("    code: ").append(toIndentedString(code)).append("\n");
-    sb.append("    message: ").append(toIndentedString(message)).append("\n");
-    sb.append("    fields: ").append(toIndentedString(fields)).append("\n");
-    sb.append("}");
-    return sb.toString();
-  }
-
-  /**
-   * Convert the given object to string with each line indented by 4 spaces
-   * (except the first line).
-   */
-  private String toIndentedString(java.lang.Object o) {
-    if (o == null) {
-      return "null";
-    }
-    return o.toString().replace("\n", "\n    ");
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: YARN-6192. Move yarn-native-service API records into hadoop-yarn-slider-core module. Contributed by Jian He

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/PlacementPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/PlacementPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/PlacementPolicy.java
deleted file mode 100644
index 5df00a0..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/PlacementPolicy.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.resource;
-
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.io.Serializable;
-import java.util.Objects;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-/**
- * Placement policy of an instance of an application. This feature is in the
- * works in YARN-4902.
- **/
-
-@ApiModel(description = "Placement policy of an instance of an application. This feature is in the works in YARN-4902.")
-@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
-public class PlacementPolicy implements Serializable {
-  private static final long serialVersionUID = 4341110649551172231L;
-
-  private String label = null;
-
-  /**
-   * Assigns an app to a named partition of the cluster where the application
-   * desires to run (optional). If not specified all apps are submitted to a
-   * default label of the app owner. One or more labels can be setup for each
-   * application owner account with required constraints like no-preemption,
-   * sla-99999, preemption-ok, etc.
-   **/
-  public PlacementPolicy label(String label) {
-    this.label = label;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Assigns an app to a named partition of the cluster where the application desires to run (optional). If not specified all apps are submitted to a default label of the app owner. One or more labels can be setup for each application owner account with required constraints like no-preemption, sla-99999, preemption-ok, etc.")
-  @JsonProperty("label")
-  public String getLabel() {
-    return label;
-  }
-
-  public void setLabel(String label) {
-    this.label = label;
-  }
-
-  @Override
-  public boolean equals(java.lang.Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    PlacementPolicy placementPolicy = (PlacementPolicy) o;
-    return Objects.equals(this.label, placementPolicy.label);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(label);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("class PlacementPolicy {\n");
-
-    sb.append("    label: ").append(toIndentedString(label)).append("\n");
-    sb.append("}");
-    return sb.toString();
-  }
-
-  /**
-   * Convert the given object to string with each line indented by 4 spaces
-   * (except the first line).
-   */
-  private String toIndentedString(java.lang.Object o) {
-    if (o == null) {
-      return "null";
-    }
-    return o.toString().replace("\n", "\n    ");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ReadinessCheck.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ReadinessCheck.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ReadinessCheck.java
deleted file mode 100644
index 26cd39a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ReadinessCheck.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.resource;
-
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.io.Serializable;
-import java.util.Objects;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonValue;
-
-/**
- * A custom command or a pluggable helper container to determine the readiness
- * of a container of a component. Readiness for every application is different.
- * Hence the need for a simple interface, with scope to support advanced
- * usecases.
- **/
-
-@ApiModel(description = "A custom command or a pluggable helper container to determine the readiness of a container of a component. Readiness for every application is different. Hence the need for a simple interface, with scope to support advanced usecases.")
-@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
-public class ReadinessCheck implements Serializable {
-  private static final long serialVersionUID = -3836839816887186801L;
-
-  public enum TypeEnum {
-    HTTP("HTTP");
-
-    private String value;
-
-    TypeEnum(String value) {
-      this.value = value;
-    }
-
-    @Override
-    @JsonValue
-    public String toString() {
-      return value;
-    }
-  }
-
-  private TypeEnum type = null;
-  private String uri = null;
-  private Artifact artifact = null;
-
-  /**
-   * E.g. HTTP (YARN will perform a simple REST call at a regular interval and
-   * expect a 204 No content).
-   **/
-  public ReadinessCheck type(TypeEnum type) {
-    this.type = type;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "E.g. HTTP (YARN will perform a simple REST call at a regular interval and expect a 204 No content).")
-  @JsonProperty("type")
-  public TypeEnum getType() {
-    return type;
-  }
-
-  public void setType(TypeEnum type) {
-    this.type = type;
-  }
-
-  /**
-   * Fully qualified REST uri endpoint.
-   **/
-  public ReadinessCheck uri(String uri) {
-    this.uri = uri;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", required = true, value = "Fully qualified REST uri endpoint.")
-  @JsonProperty("uri")
-  public String getUri() {
-    return uri;
-  }
-
-  public void setUri(String uri) {
-    this.uri = uri;
-  }
-
-  /**
-   * Artifact of the pluggable readiness check helper container (optional). If
-   * specified, this helper container typically hosts the http uri and
-   * encapsulates the complex scripts required to perform actual container
-   * readiness check. At the end it is expected to respond a 204 No content just
-   * like the simplified use case. This pluggable framework benefits application
-   * owners who can run applications without any packaging modifications. Note,
-   * artifacts of type docker only is supported for now.
-   **/
-  public ReadinessCheck artifact(Artifact artifact) {
-    this.artifact = artifact;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits application owners who can run applications without any packaging modifications. Note, artifacts of type docker only is supported for now.")
-  @JsonProperty("artifact")
-  public Artifact getArtifact() {
-    return artifact;
-  }
-
-  public void setArtifact(Artifact artifact) {
-    this.artifact = artifact;
-  }
-
-  @Override
-  public boolean equals(java.lang.Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    ReadinessCheck readinessCheck = (ReadinessCheck) o;
-    return Objects.equals(this.type, readinessCheck.type)
-        && Objects.equals(this.uri, readinessCheck.uri)
-        && Objects.equals(this.artifact, readinessCheck.artifact);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(type, uri, artifact);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("class ReadinessCheck {\n");
-
-    sb.append("    type: ").append(toIndentedString(type)).append("\n");
-    sb.append("    uri: ").append(toIndentedString(uri)).append("\n");
-    sb.append("    artifact: ").append(toIndentedString(artifact)).append("\n");
-    sb.append("}");
-    return sb.toString();
-  }
-
-  /**
-   * Convert the given object to string with each line indented by 4 spaces
-   * (except the first line).
-   */
-  private String toIndentedString(java.lang.Object o) {
-    if (o == null) {
-      return "null";
-    }
-    return o.toString().replace("\n", "\n    ");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Resource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Resource.java
deleted file mode 100644
index 234ccb3..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Resource.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.resource;
-
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.util.Objects;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-/**
- * Resource determines the amount of resources (vcores, memory, network, etc.)
- * usable by a container. This field determines the resource to be applied for
- * all the containers of a component or application. The resource specified at
- * the app (or global) level can be overriden at the component level. Only one
- * of profile OR cpu &amp; memory are exepected. It raises a validation
- * exception otherwise.
- **/
-
-@ApiModel(description = "Resource determines the amount of resources (vcores, memory, network, etc.) usable by a container. This field determines the resource to be applied for all the containers of a component or application. The resource specified at the app (or global) level can be overriden at the component level. Only one of profile OR cpu & memory are exepected. It raises a validation exception otherwise.")
-@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
-public class Resource extends BaseResource implements Cloneable {
-  private static final long serialVersionUID = -6431667797380250037L;
-
-  private String profile = null;
-  private Integer cpus = null;
-  private String memory = null;
-
-  /**
-   * Each resource profile has a unique id which is associated with a
-   * cluster-level predefined memory, cpus, etc.
-   **/
-  public Resource profile(String profile) {
-    this.profile = profile;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Each resource profile has a unique id which is associated with a cluster-level predefined memory, cpus, etc.")
-  @JsonProperty("profile")
-  public String getProfile() {
-    return profile;
-  }
-
-  public void setProfile(String profile) {
-    this.profile = profile;
-  }
-
-  /**
-   * Amount of vcores allocated to each container (optional but overrides cpus
-   * in profile if specified).
-   **/
-  public Resource cpus(Integer cpus) {
-    this.cpus = cpus;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Amount of vcores allocated to each container (optional but overrides cpus in profile if specified).")
-  @JsonProperty("cpus")
-  public Integer getCpus() {
-    return cpus;
-  }
-
-  public void setCpus(Integer cpus) {
-    this.cpus = cpus;
-  }
-
-  /**
-   * Amount of memory allocated to each container (optional but overrides memory
-   * in profile if specified). Currently accepts only an integer value and
-   * default unit is in MB.
-   **/
-  public Resource memory(String memory) {
-    this.memory = memory;
-    return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB.")
-  @JsonProperty("memory")
-  public String getMemory() {
-    return memory;
-  }
-
-  public void setMemory(String memory) {
-    this.memory = memory;
-  }
-
-  @Override
-  public boolean equals(java.lang.Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    Resource resource = (Resource) o;
-    return Objects.equals(this.profile, resource.profile)
-        && Objects.equals(this.cpus, resource.cpus)
-        && Objects.equals(this.memory, resource.memory);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(profile, cpus, memory);
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("class Resource {\n");
-
-    sb.append("    profile: ").append(toIndentedString(profile)).append("\n");
-    sb.append("    cpus: ").append(toIndentedString(cpus)).append("\n");
-    sb.append("    memory: ").append(toIndentedString(memory)).append("\n");
-    sb.append("}");
-    return sb.toString();
-  }
-
-  /**
-   * Convert the given object to string with each line indented by 4 spaces
-   * (except the first line).
-   */
-  private String toIndentedString(java.lang.Object o) {
-    if (o == null) {
-      return "null";
-    }
-    return o.toString().replace("\n", "\n    ");
-  }
-
-  @Override
-  public Object clone() throws CloneNotSupportedException {
-    return super.clone();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml
index f2f8b5b..31e3051 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml
@@ -21,7 +21,7 @@
         <servlet-class>com.sun.jersey.spi.container.servlet.ServletContainer</servlet-class>
         <init-param>
             <param-name>com.sun.jersey.config.property.packages</param-name>
-            <param-value>org.apache.hadoop.yarn.services.webapp,org.apache.hadoop.yarn.services.api,org.apache.hadoop.yarn.services.resource,org.apache.hadoop.yarn.services.api.impl</param-value>
+            <param-value>org.apache.hadoop.yarn.services.webapp,org.apache.hadoop.yarn.services.api,org.apache.slider.api.resource,org.apache.hadoop.yarn.services.api.impl</param-value>
         </init-param>
         <init-param>
           <param-name>com.sun.jersey.api.json.POJOMappingFeature</param-name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java
index fdf1419..7bfb410 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java
@@ -23,9 +23,9 @@ import static org.apache.hadoop.yarn.services.utils.RestApiErrorMessages.*;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.hadoop.yarn.services.resource.Application;
-import org.apache.hadoop.yarn.services.resource.Artifact;
-import org.apache.hadoop.yarn.services.resource.Resource;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Artifact;
+import org.apache.slider.api.resource.Resource;
 import org.apache.slider.common.SliderKeys;
 import org.junit.After;
 import org.junit.Assert;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
index 084a10b..ee77ecb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
@@ -349,6 +349,11 @@
       <scope>compile</scope>
     </dependency>
 
+    <dependency>
+        <groupId>io.swagger</groupId>
+        <artifactId>swagger-annotations</artifactId>
+        <version>1.5.4</version>
+    </dependency>
   </dependencies>
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Application.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Application.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Application.java
new file mode 100644
index 0000000..cc3355a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Application.java
@@ -0,0 +1,449 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.api.resource;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+/**
+ * An Application resource has the following attributes.
+ **/
+
+@ApiModel(description = "An Application resource has the following attributes.")
+@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
+@XmlRootElement
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonPropertyOrder({ "name", "state", "resource", "number_of_containers",
+    "lifetime", "containers" })
+public class Application extends BaseResource {
+  private static final long serialVersionUID = -4491694636566094885L;
+
+  private String name = null;
+  private String id = null;
+  private Artifact artifact = null;
+  private Resource resource = null;
+  private String launchCommand = null;
+  private Date launchTime = null;
+  private Long numberOfContainers = null;
+  private Long numberOfRunningContainers = null;
+  private Long lifetime = null;
+  private PlacementPolicy placementPolicy = null;
+  private List<Component> components = null;
+  private Configuration configuration = null;
+  private List<Container> containers = new ArrayList<>();
+  private ApplicationState state = null;
+  private Map<String, String> quicklinks = null;
+  private String queue = null;
+
+  /**
+   * A unique application name.
+   **/
+  public Application name(String name) {
+    this.name = name;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", required = true, value = "A unique application name.")
+  @JsonProperty("name")
+  public String getName() {
+    return name;
+  }
+
+  public void setName(String name) {
+    this.name = name;
+  }
+
+  /**
+   * A unique application id.
+   **/
+  public Application id(String id) {
+    this.id = id;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "A unique application id.")
+  @JsonProperty("id")
+  public String getId() {
+    return id;
+  }
+
+  public void setId(String id) {
+    this.id = id;
+  }
+
+  /**
+   * Artifact of single-component applications. Mandatory if components
+   * attribute is not specified.
+   **/
+  public Application artifact(Artifact artifact) {
+    this.artifact = artifact;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Artifact of single-component applications. Mandatory if components attribute is not specified.")
+  @JsonProperty("artifact")
+  public Artifact getArtifact() {
+    return artifact;
+  }
+
+  public void setArtifact(Artifact artifact) {
+    this.artifact = artifact;
+  }
+
+  /**
+   * Resource of single-component applications or the global default for
+   * multi-component applications. Mandatory if it is a single-component
+   * application and if cpus and memory are not specified at the Application
+   * level.
+   **/
+  public Application resource(Resource resource) {
+    this.resource = resource;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Resource of single-component applications or the global default for multi-component applications. Mandatory if it is a single-component application and if cpus and memory are not specified at the Application level.")
+  @JsonProperty("resource")
+  public Resource getResource() {
+    return resource;
+  }
+
+  public void setResource(Resource resource) {
+    this.resource = resource;
+  }
+
+  /**
+   * The custom launch command of an application component (optional). If not
+   * specified for applications with docker images say, it will default to the
+   * default start command of the image. If there is a single component in this
+   * application, you can specify this without the need to have a 'components'
+   * section.
+   **/
+  public Application launchCommand(String launchCommand) {
+    this.launchCommand = launchCommand;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "The custom launch command of an application component (optional). If not specified for applications with docker images say, it will default to the default start command of the image. If there is a single component in this application, you can specify this without the need to have a 'components' section.")
+  @JsonProperty("launch_command")
+  public String getLaunchCommand() {
+    return launchCommand;
+  }
+
+  @XmlElement(name = "launch_command")
+  public void setLaunchCommand(String launchCommand) {
+    this.launchCommand = launchCommand;
+  }
+
+  /**
+   * The time when the application was created, e.g. 2016-03-16T01:01:49.000Z.
+   **/
+  public Application launchTime(Date launchTime) {
+    this.launchTime = launchTime == null ? null : (Date) launchTime.clone();
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "The time when the application was created, e.g. 2016-03-16T01:01:49.000Z.")
+  @JsonProperty("launch_time")
+  public Date getLaunchTime() {
+    return launchTime == null ? null : (Date) launchTime.clone();
+  }
+
+  @XmlElement(name = "launch_time")
+  public void setLaunchTime(Date launchTime) {
+    this.launchTime = launchTime == null ? null : (Date) launchTime.clone();
+  }
+
+  /**
+   * Number of containers for each app-component in the application. Each
+   * app-component can further override this app-level global default.
+   **/
+  public Application numberOfContainers(Long numberOfContainers) {
+    this.numberOfContainers = numberOfContainers;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Number of containers for each app-component in the application. Each app-component can further override this app-level global default.")
+  @JsonProperty("number_of_containers")
+  public Long getNumberOfContainers() {
+    return numberOfContainers;
+  }
+
+  @XmlElement(name = "number_of_containers")
+  public void setNumberOfContainers(Long numberOfContainers) {
+    this.numberOfContainers = numberOfContainers;
+  }
+
+  /**
+   * In get response this provides the total number of running containers for
+   * this application (across all components) at the time of request. Note, a
+   * subsequent request can return a different number as and when more
+   * containers get allocated until it reaches the total number of containers or
+   * if a flex request has been made between the two requests.
+   **/
+  public Application numberOfRunningContainers(Long numberOfRunningContainers) {
+    this.numberOfRunningContainers = numberOfRunningContainers;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "In get response this provides the total number of running containers for this application (across all components) at the time of request. Note, a subsequent request can return a different number as and when more containers get allocated until it reaches the total number of containers or if a flex request has been made between the two requests.")
+  @JsonProperty("number_of_running_containers")
+  public Long getNumberOfRunningContainers() {
+    return numberOfRunningContainers;
+  }
+
+  @XmlElement(name = "number_of_running_containers")
+  public void setNumberOfRunningContainers(Long numberOfRunningContainers) {
+    this.numberOfRunningContainers = numberOfRunningContainers;
+  }
+
+  /**
+   * Life time (in seconds) of the application from the time it reaches the
+   * STARTED state (after which it is automatically destroyed by YARN). For
+   * unlimited lifetime do not set a lifetime value.
+   **/
+  public Application lifetime(Long lifetime) {
+    this.lifetime = lifetime;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Life time (in seconds) of the application from the time it reaches the STARTED state (after which it is automatically destroyed by YARN). For unlimited lifetime do not set a lifetime value.")
+  @JsonProperty("lifetime")
+  public Long getLifetime() {
+    return lifetime;
+  }
+
+  public void setLifetime(Long lifetime) {
+    this.lifetime = lifetime;
+  }
+
+  /**
+   * Advanced scheduling and placement policies (optional). If not specified, it
+   * defaults to the default placement policy of the app owner. The design of
+   * placement policies are in the works. It is not very clear at this point,
+   * how policies in conjunction with labels be exposed to application owners.
+   * This is a placeholder for now. The advanced structure of this attribute
+   * will be determined by YARN-4902.
+   **/
+  public Application placementPolicy(PlacementPolicy placementPolicy) {
+    this.placementPolicy = placementPolicy;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Advanced scheduling and placement policies (optional). If not specified, it defaults to the default placement policy of the app owner. The design of placement policies are in the works. It is not very clear at this point, how policies in conjunction with labels be exposed to application owners. This is a placeholder for now. The advanced structure of this attribute will be determined by YARN-4902.")
+  @JsonProperty("placement_policy")
+  public PlacementPolicy getPlacementPolicy() {
+    return placementPolicy;
+  }
+
+  @XmlElement(name = "placement_policy")
+  public void setPlacementPolicy(PlacementPolicy placementPolicy) {
+    this.placementPolicy = placementPolicy;
+  }
+
+  /**
+   * Components of an application.
+   **/
+  public Application components(List<Component> components) {
+    this.components = components;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Components of an application.")
+  @JsonProperty("components")
+  public List<Component> getComponents() {
+    return components;
+  }
+
+  public void setComponents(List<Component> components) {
+    this.components = components;
+  }
+
+  /**
+   * Config properties of an application. Configurations provided at the
+   * application/global level are available to all the components. Specific
+   * properties can be overridden at the component level.
+   **/
+  public Application configuration(Configuration configuration) {
+    this.configuration = configuration;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Config properties of an application. Configurations provided at the application/global level are available to all the components. Specific properties can be overridden at the component level.")
+  @JsonProperty("configuration")
+  public Configuration getConfiguration() {
+    return configuration;
+  }
+
+  public void setConfiguration(Configuration configuration) {
+    this.configuration = configuration;
+  }
+
+  /**
+   * Containers of a started application. Specifying a value for this attribute
+   * for the POST payload raises a validation error. This blob is available only
+   * in the GET response of a started application.
+   **/
+  public Application containers(List<Container> containers) {
+    this.containers = containers;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Containers of a started application. Specifying a value for this attribute for the POST payload raises a validation error. This blob is available only in the GET response of a started application.")
+  @JsonProperty("containers")
+  public List<Container> getContainers() {
+    return containers;
+  }
+
+  public void setContainers(List<Container> containers) {
+    this.containers = containers;
+  }
+
+  public void addContainer(Container container) {
+    this.containers.add(container);
+  }
+
+  /**
+   * State of the application. Specifying a value for this attribute for the
+   * POST payload raises a validation error. This attribute is available only in
+   * the GET response of a started application.
+   **/
+  public Application state(ApplicationState state) {
+    this.state = state;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "State of the application. Specifying a value for this attribute for the POST payload raises a validation error. This attribute is available only in the GET response of a started application.")
+  @JsonProperty("state")
+  public ApplicationState getState() {
+    return state;
+  }
+
+  public void setState(ApplicationState state) {
+    this.state = state;
+  }
+
+  /**
+   * A blob of key-value pairs of quicklinks to be exported for an application.
+   **/
+  public Application quicklinks(Map<String, String> quicklinks) {
+    this.quicklinks = quicklinks;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "A blob of key-value pairs of quicklinks to be exported for an application.")
+  @JsonProperty("quicklinks")
+  public Map<String, String> getQuicklinks() {
+    return quicklinks;
+  }
+
+  public void setQuicklinks(Map<String, String> quicklinks) {
+    this.quicklinks = quicklinks;
+  }
+
+  /**
+   * The YARN queue that this application should be submitted to.
+   **/
+  public Application queue(String queue) {
+    this.queue = queue;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "The YARN queue that this application should be submitted to.")
+  @JsonProperty("queue")
+  public String getQueue() {
+    return queue;
+  }
+
+  public void setQueue(String queue) {
+    this.queue = queue;
+  }
+
+  @Override
+  public boolean equals(java.lang.Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    Application application = (Application) o;
+    return Objects.equals(this.name, application.name);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(name);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("class Application {\n");
+
+    sb.append("    name: ").append(toIndentedString(name)).append("\n");
+    sb.append("    id: ").append(toIndentedString(id)).append("\n");
+    sb.append("    artifact: ").append(toIndentedString(artifact)).append("\n");
+    sb.append("    resource: ").append(toIndentedString(resource)).append("\n");
+    sb.append("    launchCommand: ").append(toIndentedString(launchCommand))
+        .append("\n");
+    sb.append("    launchTime: ").append(toIndentedString(launchTime))
+        .append("\n");
+    sb.append("    numberOfContainers: ")
+        .append(toIndentedString(numberOfContainers)).append("\n");
+    sb.append("    numberOfRunningContainers: ")
+        .append(toIndentedString(numberOfRunningContainers)).append("\n");
+    sb.append("    lifetime: ").append(toIndentedString(lifetime)).append("\n");
+    sb.append("    placementPolicy: ").append(toIndentedString(placementPolicy))
+        .append("\n");
+    sb.append("    components: ").append(toIndentedString(components))
+        .append("\n");
+    sb.append("    configuration: ").append(toIndentedString(configuration))
+        .append("\n");
+    sb.append("    containers: ").append(toIndentedString(containers))
+        .append("\n");
+    sb.append("    state: ").append(toIndentedString(state)).append("\n");
+    sb.append("    quicklinks: ").append(toIndentedString(quicklinks))
+        .append("\n");
+    sb.append("    queue: ").append(toIndentedString(queue)).append("\n");
+    sb.append("}");
+    return sb.toString();
+  }
+
+  /**
+   * Convert the given object to string with each line indented by 4 spaces
+   * (except the first line).
+   */
+  private String toIndentedString(java.lang.Object o) {
+    if (o == null) {
+      return "null";
+    }
+    return o.toString().replace("\n", "\n    ");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ApplicationState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ApplicationState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ApplicationState.java
new file mode 100644
index 0000000..6827c16
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ApplicationState.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.resource;
+
+import io.swagger.annotations.ApiModel;
+
+/**
+ * The current state of an application.
+ **/
+
+@ApiModel(description = "The current state of an application.")
+@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
+public enum ApplicationState {
+  ACCEPTED, STARTED, READY, STOPPED, FAILED;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ApplicationStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ApplicationStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ApplicationStatus.java
new file mode 100644
index 0000000..06960a8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ApplicationStatus.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.resource;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+import java.util.Objects;
+
+import javax.xml.bind.annotation.XmlRootElement;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+/**
+ * The current status of a submitted application, returned as a response to the
+ * GET API.
+ **/
+
+@ApiModel(description = "The current status of a submitted application, returned as a response to the GET API.")
+@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
+@XmlRootElement
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class ApplicationStatus extends BaseResource {
+  private static final long serialVersionUID = -3469885905347851034L;
+
+  private String diagnostics = null;
+  private ApplicationState state = null;
+  private Integer code = null;
+
+  /**
+   * Diagnostic information (if any) for the reason of the current state of the
+   * application. It typically has a non-null value, if the application is in a
+   * non-running state.
+   **/
+  public ApplicationStatus diagnostics(String diagnostics) {
+    this.diagnostics = diagnostics;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Diagnostic information (if any) for the reason of the current state of the application. It typically has a non-null value, if the application is in a non-running state.")
+  @JsonProperty("diagnostics")
+  public String getDiagnostics() {
+    return diagnostics;
+  }
+
+  public void setDiagnostics(String diagnostics) {
+    this.diagnostics = diagnostics;
+  }
+
+  /**
+   * Application state.
+   **/
+  public ApplicationStatus state(ApplicationState state) {
+    this.state = state;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Application state.")
+  @JsonProperty("state")
+  public ApplicationState getState() {
+    return state;
+  }
+
+  public void setState(ApplicationState state) {
+    this.state = state;
+  }
+
+  /**
+   * An error code specific to a scenario which app owners should be able to use
+   * to understand the failure in addition to the diagnostic information.
+   **/
+  public ApplicationStatus code(Integer code) {
+    this.code = code;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "An error code specific to a scenario which app owners should be able to use to understand the failure in addition to the diagnostic information.")
+  @JsonProperty("code")
+  public Integer getCode() {
+    return code;
+  }
+
+  public void setCode(Integer code) {
+    this.code = code;
+  }
+
+  @Override
+  public boolean equals(java.lang.Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    ApplicationStatus applicationStatus = (ApplicationStatus) o;
+    return Objects.equals(this.diagnostics, applicationStatus.diagnostics)
+        && Objects.equals(this.state, applicationStatus.state)
+        && Objects.equals(this.code, applicationStatus.code);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(diagnostics, state, code);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("class ApplicationStatus {\n");
+
+    sb.append("    diagnostics: ").append(toIndentedString(diagnostics))
+        .append("\n");
+    sb.append("    state: ").append(toIndentedString(state)).append("\n");
+    sb.append("    code: ").append(toIndentedString(code)).append("\n");
+    sb.append("}");
+    return sb.toString();
+  }
+
+  /**
+   * Convert the given object to string with each line indented by 4 spaces
+   * (except the first line).
+   */
+  private String toIndentedString(java.lang.Object o) {
+    if (o == null) {
+      return "null";
+    }
+    return o.toString().replace("\n", "\n    ");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Artifact.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Artifact.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Artifact.java
new file mode 100644
index 0000000..f274d7d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Artifact.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.resource;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+import java.io.Serializable;
+import java.util.Objects;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonValue;
+
+/**
+ * Artifact of an application component.
+ **/
+
+@ApiModel(description = "Artifact of an application component")
+@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Artifact implements Serializable {
+  private static final long serialVersionUID = 3608929500111099035L;
+
+  private String id = null;
+
+  public enum TypeEnum {
+    DOCKER("DOCKER"), TARBALL("TARBALL"), APPLICATION("APPLICATION");
+
+    private String value;
+
+    TypeEnum(String value) {
+      this.value = value;
+    }
+
+    @Override
+    @JsonValue
+    public String toString() {
+      return value;
+    }
+  }
+
+  private TypeEnum type = TypeEnum.DOCKER;
+  private String uri = null;
+
+  /**
+   * Artifact id. Examples are package location uri for tarball based apps,
+   * image name for docker, etc.
+   **/
+  public Artifact id(String id) {
+    this.id = id;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", required = true, value = "Artifact id. Examples are package location uri for tarball based apps, image name for docker, etc.")
+  @JsonProperty("id")
+  public String getId() {
+    return id;
+  }
+
+  public void setId(String id) {
+    this.id = id;
+  }
+
+  /**
+   * Artifact type, like docker, tarball, etc. (optional).
+   **/
+  public Artifact type(TypeEnum type) {
+    this.type = type;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Artifact type, like docker, tarball, etc. (optional).")
+  @JsonProperty("type")
+  public TypeEnum getType() {
+    return type;
+  }
+
+  public void setType(TypeEnum type) {
+    this.type = type;
+  }
+
+  /**
+   * Artifact location to support multiple artifact stores (optional).
+   **/
+  public Artifact uri(String uri) {
+    this.uri = uri;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Artifact location to support multiple artifact stores (optional).")
+  @JsonProperty("uri")
+  public String getUri() {
+    return uri;
+  }
+
+  public void setUri(String uri) {
+    this.uri = uri;
+  }
+
+  @Override
+  public boolean equals(java.lang.Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    Artifact artifact = (Artifact) o;
+    return Objects.equals(this.id, artifact.id)
+        && Objects.equals(this.type, artifact.type)
+        && Objects.equals(this.uri, artifact.uri);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(id, type, uri);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("class Artifact {\n");
+
+    sb.append("    id: ").append(toIndentedString(id)).append("\n");
+    sb.append("    type: ").append(toIndentedString(type)).append("\n");
+    sb.append("    uri: ").append(toIndentedString(uri)).append("\n");
+    sb.append("}");
+    return sb.toString();
+  }
+
+  /**
+   * Convert the given object to string with each line indented by 4 spaces
+   * (except the first line).
+   */
+  private String toIndentedString(java.lang.Object o) {
+    if (o == null) {
+      return "null";
+    }
+    return o.toString().replace("\n", "\n    ");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/BaseResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/BaseResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/BaseResource.java
new file mode 100644
index 0000000..a23c1fb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/BaseResource.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.resource;
+
+import java.io.Serializable;
+
+public class BaseResource implements Serializable {
+  private static final long serialVersionUID = 1492603053176889431L;
+
+  private String uri;
+
+  /**
+   * Resource location, e.g. \
+   * "/applications/helloworld/containers/container_e3751_1458061340047_0008_01_000002\
+   * "
+   **/
+  public String getUri() {
+    return uri;
+  }
+
+  public void setUri(String uri) {
+    this.uri = uri;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder();
+    builder.append("BaseResource [uri=");
+    builder.append(uri);
+    builder.append("]");
+    return builder.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Component.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Component.java
new file mode 100644
index 0000000..4f50564
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Component.java
@@ -0,0 +1,381 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.resource;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+/**
+ * One or more components of the application. If the application is HBase say,
+ * then the component can be a simple role like master or regionserver. If the
+ * application is a complex business webapp then a component can be other
+ * applications say Kafka or Storm. Thereby it opens up the support for complex
+ * and nested applications.
+ **/
+
+@ApiModel(description = "One or more components of the application. If the application is HBase say, then the component can be a simple role like master or regionserver. If the application is a complex business webapp then a component can be other applications say Kafka or Storm. Thereby it opens up the support for complex and nested applications.")
+@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
+@XmlRootElement
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Component implements Serializable {
+  private static final long serialVersionUID = -8430058381509087805L;
+
+  private String name = null;
+  private List<String> dependencies = new ArrayList<String>();
+  private ReadinessCheck readinessCheck = null;
+  private Artifact artifact = null;
+  private String launchCommand = null;
+  private Resource resource = null;
+  private Long numberOfContainers = null;
+  private Boolean uniqueComponentSupport = null;
+  private Boolean runPrivilegedContainer = null;
+  private PlacementPolicy placementPolicy = null;
+  private Configuration configuration = null;
+  private List<String> quicklinks = new ArrayList<String>();
+
+  /**
+   * Name of the application component (mandatory).
+   **/
+  public Component name(String name) {
+    this.name = name;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", required = true, value = "Name of the application component (mandatory).")
+  @JsonProperty("name")
+  public String getName() {
+    return name;
+  }
+
+  public void setName(String name) {
+    this.name = name;
+  }
+
+  /**
+   * An array of application components which should be in READY state (as
+   * defined by readiness check), before this component can be started. The
+   * dependencies across all components of an application should be represented
+   * as a DAG.
+   **/
+  public Component dependencies(List<String> dependencies) {
+    this.dependencies = dependencies;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "An array of application components which should be in READY state (as defined by readiness check), before this component can be started. The dependencies across all components of an application should be represented as a DAG.")
+  @JsonProperty("dependencies")
+  public List<String> getDependencies() {
+    return dependencies;
+  }
+
+  public void setDependencies(List<String> dependencies) {
+    this.dependencies = dependencies;
+  }
+
+  /**
+   * Readiness check for this app-component.
+   **/
+  public Component readinessCheck(ReadinessCheck readinessCheck) {
+    this.readinessCheck = readinessCheck;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Readiness check for this app-component.")
+  @JsonProperty("readiness_check")
+  public ReadinessCheck getReadinessCheck() {
+    return readinessCheck;
+  }
+
+  @XmlElement(name = "readiness_check")
+  public void setReadinessCheck(ReadinessCheck readinessCheck) {
+    this.readinessCheck = readinessCheck;
+  }
+
+  /**
+   * Artifact of the component (optional). If not specified, the application
+   * level global artifact takes effect.
+   **/
+  public Component artifact(Artifact artifact) {
+    this.artifact = artifact;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Artifact of the component (optional). If not specified, the application level global artifact takes effect.")
+  @JsonProperty("artifact")
+  public Artifact getArtifact() {
+    return artifact;
+  }
+
+  public void setArtifact(Artifact artifact) {
+    this.artifact = artifact;
+  }
+
+  /**
+   * The custom launch command of this component (optional). When specified at
+   * the component level, it overrides the value specified at the global level
+   * (if any).
+   **/
+  public Component launchCommand(String launchCommand) {
+    this.launchCommand = launchCommand;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "The custom launch command of this component (optional). When specified at the component level, it overrides the value specified at the global level (if any).")
+  @JsonProperty("launch_command")
+  public String getLaunchCommand() {
+    return launchCommand;
+  }
+
+  @XmlElement(name = "launch_command")
+  public void setLaunchCommand(String launchCommand) {
+    this.launchCommand = launchCommand;
+  }
+
+  /**
+   * Resource of this component (optional). If not specified, the application
+   * level global resource takes effect.
+   **/
+  public Component resource(Resource resource) {
+    this.resource = resource;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Resource of this component (optional). If not specified, the application level global resource takes effect.")
+  @JsonProperty("resource")
+  public Resource getResource() {
+    return resource;
+  }
+
+  public void setResource(Resource resource) {
+    this.resource = resource;
+  }
+
+  /**
+   * Number of containers for this app-component (optional). If not specified,
+   * the application level global number_of_containers takes effect.
+   **/
+  public Component numberOfContainers(Long numberOfContainers) {
+    this.numberOfContainers = numberOfContainers;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Number of containers for this app-component (optional). If not specified, the application level global number_of_containers takes effect.")
+  @JsonProperty("number_of_containers")
+  public Long getNumberOfContainers() {
+    return numberOfContainers;
+  }
+
+  @XmlElement(name = "number_of_containers")
+  public void setNumberOfContainers(Long numberOfContainers) {
+    this.numberOfContainers = numberOfContainers;
+  }
+
+  /**
+   * Certain applications need to define multiple components using the same
+   * artifact and resource profile, differing only in configurations. In such
+   * cases, this field helps app owners to avoid creating multiple component
+   * definitions with repeated information. The number_of_containers field
+   * dictates the initial number of components created. Component names
+   * typically differ with a trailing id, but assumptions should not be made on
+   * that, as the algorithm can change at any time. Configurations section will
+   * be able to use placeholders like ${USER}, ${CLUSTER_NAME} and
+   * ${COMPONENT_NAME} to be replaced at runtime with user the app is submitted
+   * as, application name and application component name respectively. Launch
+   * command can use placeholders like ${APP_COMPONENT_NAME} and ${APP_NAME} to
+   * get its component name and app name respectively at runtime. The best part
+   * of this feature is that when the component is flexed up, entirely new
+   * components (with new trailing ids) are created.
+   **/
+  public Component uniqueComponentSupport(Boolean uniqueComponentSupport) {
+    this.uniqueComponentSupport = uniqueComponentSupport;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Certain applications need to define multiple components using the same artifact and resource profile, differing only in configurations. In such cases, this field helps app owners to avoid creating multiple component definitions with repeated information. The number_of_containers field dictates the initial number of components created. Component names typically differ with a trailing id, but assumptions should not be made on that, as the algorithm can change at any time. Configurations section will be able to use placeholders like ${USER}, ${CLUSTER_NAME} and ${COMPONENT_NAME} to be replaced at runtime with user the app is submitted as, application name and application component name respectively. Launch command can use placeholders like ${APP_COMPONENT_NAME} and ${APP_NAME} to get its component name and app name respectively at runtime. The best part of this feature is that when the component is flexed up, entirely new components (with 
 new trailing ids) are created.")
+  @JsonProperty("unique_component_support")
+  public Boolean getUniqueComponentSupport() {
+    return uniqueComponentSupport;
+  }
+
+  @XmlElement(name = "unique_component_support")
+  public void setUniqueComponentSupport(Boolean uniqueComponentSupport) {
+    this.uniqueComponentSupport = uniqueComponentSupport;
+  }
+
+  /**
+   * Run all containers of this component in privileged mode (YARN-4262).
+   **/
+  public Component runPrivilegedContainer(Boolean runPrivilegedContainer) {
+    this.runPrivilegedContainer = runPrivilegedContainer;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Run all containers of this component in privileged mode (YARN-4262).")
+  @JsonProperty("run_privileged_container")
+  public Boolean getRunPrivilegedContainer() {
+    return runPrivilegedContainer;
+  }
+
+  @XmlElement(name = "run_privileged_container")
+  public void setRunPrivilegedContainer(Boolean runPrivilegedContainer) {
+    this.runPrivilegedContainer = runPrivilegedContainer;
+  }
+
+  /**
+   * Advanced scheduling and placement policies for all containers of this
+   * component (optional). If not specified, the app level placement_policy
+   * takes effect. Refer to the description at the global level for more
+   * details.
+   **/
+  public Component placementPolicy(PlacementPolicy placementPolicy) {
+    this.placementPolicy = placementPolicy;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Advanced scheduling and placement policies for all containers of this component (optional). If not specified, the app level placement_policy takes effect. Refer to the description at the global level for more details.")
+  @JsonProperty("placement_policy")
+  public PlacementPolicy getPlacementPolicy() {
+    return placementPolicy;
+  }
+
+  @XmlElement(name = "placement_policy")
+  public void setPlacementPolicy(PlacementPolicy placementPolicy) {
+    this.placementPolicy = placementPolicy;
+  }
+
+  /**
+   * Config properties for this app-component.
+   **/
+  public Component configuration(Configuration configuration) {
+    this.configuration = configuration;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Config properties for this app-component.")
+  @JsonProperty("configuration")
+  public Configuration getConfiguration() {
+    return configuration;
+  }
+
+  public void setConfiguration(Configuration configuration) {
+    this.configuration = configuration;
+  }
+
+  /**
+   * A list of quicklink keys defined at the application level, and to be
+   * resolved by this component.
+   **/
+  public Component quicklinks(List<String> quicklinks) {
+    this.quicklinks = quicklinks;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "A list of quicklink keys defined at the application level, and to be resolved by this component.")
+  @JsonProperty("quicklinks")
+  public List<String> getQuicklinks() {
+    return quicklinks;
+  }
+
+  public void setQuicklinks(List<String> quicklinks) {
+    this.quicklinks = quicklinks;
+  }
+
+  @Override
+  public boolean equals(java.lang.Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    Component component = (Component) o;
+    return Objects.equals(this.name, component.name)
+        && Objects.equals(this.dependencies, component.dependencies)
+        && Objects.equals(this.readinessCheck, component.readinessCheck)
+        && Objects.equals(this.artifact, component.artifact)
+        && Objects.equals(this.launchCommand, component.launchCommand)
+        && Objects.equals(this.resource, component.resource)
+        && Objects.equals(this.numberOfContainers, component.numberOfContainers)
+        && Objects.equals(this.uniqueComponentSupport,
+            component.uniqueComponentSupport)
+        && Objects.equals(this.runPrivilegedContainer,
+            component.runPrivilegedContainer)
+        && Objects.equals(this.placementPolicy, component.placementPolicy)
+        && Objects.equals(this.configuration, component.configuration)
+        && Objects.equals(this.quicklinks, component.quicklinks);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(name, dependencies, readinessCheck, artifact,
+        launchCommand, resource, numberOfContainers, uniqueComponentSupport,
+        runPrivilegedContainer, placementPolicy, configuration, quicklinks);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("class Component {\n");
+
+    sb.append("    name: ").append(toIndentedString(name)).append("\n");
+    sb.append("    dependencies: ").append(toIndentedString(dependencies))
+        .append("\n");
+    sb.append("    readinessCheck: ").append(toIndentedString(readinessCheck))
+        .append("\n");
+    sb.append("    artifact: ").append(toIndentedString(artifact)).append("\n");
+    sb.append("    launchCommand: ").append(toIndentedString(launchCommand))
+        .append("\n");
+    sb.append("    resource: ").append(toIndentedString(resource)).append("\n");
+    sb.append("    numberOfContainers: ")
+        .append(toIndentedString(numberOfContainers)).append("\n");
+    sb.append("    uniqueComponentSupport: ")
+        .append(toIndentedString(uniqueComponentSupport)).append("\n");
+    sb.append("    runPrivilegedContainer: ")
+        .append(toIndentedString(runPrivilegedContainer)).append("\n");
+    sb.append("    placementPolicy: ").append(toIndentedString(placementPolicy))
+        .append("\n");
+    sb.append("    configuration: ").append(toIndentedString(configuration))
+        .append("\n");
+    sb.append("    quicklinks: ").append(toIndentedString(quicklinks))
+        .append("\n");
+    sb.append("}");
+    return sb.toString();
+  }
+
+  /**
+   * Convert the given object to string with each line indented by 4 spaces
+   * (except the first line).
+   */
+  private String toIndentedString(java.lang.Object o) {
+    if (o == null) {
+      return "null";
+    }
+    return o.toString().replace("\n", "\n    ");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java
new file mode 100644
index 0000000..bad68c1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.resource;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+import java.io.Serializable;
+import java.util.Objects;
+
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonValue;
+
+/**
+ * A config file that needs to be created and made available as a volume in an
+ * application component container.
+ **/
+
+@ApiModel(description = "A config file that needs to be created and made available as a volume in an application component container.")
+@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
+@XmlRootElement
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class ConfigFile implements Serializable {
+  private static final long serialVersionUID = -7009402089417704612L;
+
+  public enum TypeEnum {
+    XML("XML"), PROPERTIES("PROPERTIES"), JSON("JSON"), YAML("YAML"), TEMPLATE(
+        "TEMPLATE"), ENV("ENV"), HADOOP_XML("HADOOP_XML");
+
+    private String value;
+
+    TypeEnum(String value) {
+      this.value = value;
+    }
+
+    @Override
+    @JsonValue
+    public String toString() {
+      return value;
+    }
+  }
+
+  private TypeEnum type = null;
+  private String destFile = null;
+  private String srcFile = null;
+  private Object props = null;
+
+  /**
+   * Config file in the standard format like xml, properties, json, yaml,
+   * template.
+   **/
+  public ConfigFile type(TypeEnum type) {
+    this.type = type;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Config file in the standard format like xml, properties, json, yaml, template.")
+  @JsonProperty("type")
+  public TypeEnum getType() {
+    return type;
+  }
+
+  public void setType(TypeEnum type) {
+    this.type = type;
+  }
+
+  /**
+   * The absolute path that this configuration file should be mounted as, in the
+   * application container.
+   **/
+  public ConfigFile destFile(String destFile) {
+    this.destFile = destFile;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "The absolute path that this configuration file should be mounted as, in the application container.")
+  @JsonProperty("dest_file")
+  public String getDestFile() {
+    return destFile;
+  }
+
+  @XmlElement(name = "dest_file")
+  public void setDestFile(String destFile) {
+    this.destFile = destFile;
+  }
+
+  /**
+   * Required for type template. This provides the source location of the
+   * template which needs to be mounted as dest_file post property
+   * substitutions. Typically the src_file would point to a source controlled
+   * network accessible file maintained by tools like puppet, chef, etc.
+   **/
+  public ConfigFile srcFile(String srcFile) {
+    this.srcFile = srcFile;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Required for type template. This provides the source location of the template which needs to be mounted as dest_file post property substitutions. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, etc.")
+  @JsonProperty("src_file")
+  public String getSrcFile() {
+    return srcFile;
+  }
+
+  @XmlElement(name = "src_file")
+  public void setSrcFile(String srcFile) {
+    this.srcFile = srcFile;
+  }
+
+  /**
+   * A blob of key value pairs that will be dumped in the dest_file in the
+   * format as specified in type. If the type is template then the attribute
+   * src_file is mandatory and the src_file content is dumped to dest_file post
+   * property substitutions.
+   **/
+  public ConfigFile props(Object props) {
+    this.props = props;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If the type is template then the attribute src_file is mandatory and the src_file content is dumped to dest_file post property substitutions.")
+  @JsonProperty("props")
+  public Object getProps() {
+    return props;
+  }
+
+  public void setProps(Object props) {
+    this.props = props;
+  }
+
+  @Override
+  public boolean equals(java.lang.Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    ConfigFile configFile = (ConfigFile) o;
+    return Objects.equals(this.type, configFile.type)
+        && Objects.equals(this.destFile, configFile.destFile)
+        && Objects.equals(this.srcFile, configFile.srcFile)
+        && Objects.equals(this.props, configFile.props);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(type, destFile, srcFile, props);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("class ConfigFile {\n");
+
+    sb.append("    type: ").append(toIndentedString(type)).append("\n");
+    sb.append("    destFile: ").append(toIndentedString(destFile)).append("\n");
+    sb.append("    srcFile: ").append(toIndentedString(srcFile)).append("\n");
+    sb.append("    props: ").append(toIndentedString(props)).append("\n");
+    sb.append("}");
+    return sb.toString();
+  }
+
+  /**
+   * Convert the given object to string with each line indented by 4 spaces
+   * (except the first line).
+   */
+  private String toIndentedString(java.lang.Object o) {
+    if (o == null) {
+      return "null";
+    }
+    return o.toString().replace("\n", "\n    ");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
new file mode 100644
index 0000000..c4f2ad4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.resource;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+/**
+ * Set of configuration properties that can be injected into the application
+ * components via envs, files and custom pluggable helper docker containers.
+ * Files of several standard formats like xml, properties, json, yaml and
+ * templates will be supported.
+ **/
+
+@ApiModel(description = "Set of configuration properties that can be injected into the application components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported.")
+@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Configuration implements Serializable {
+  private static final long serialVersionUID = -4330788704981074466L;
+
+  private Map<String, String> properties = new HashMap<String, String>();
+  private Map<String, String> env = new HashMap<String, String>();
+  private List<ConfigFile> files = new ArrayList<ConfigFile>();
+
+  /**
+   * A blob of key-value pairs of common application properties.
+   **/
+  public Configuration properties(Map<String, String> properties) {
+    this.properties = properties;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "A blob of key-value pairs of common application properties.")
+  @JsonProperty("properties")
+  public Map<String, String> getProperties() {
+    return properties;
+  }
+
+  public void setProperties(Map<String, String> properties) {
+    this.properties = properties;
+  }
+
+  /**
+   * A blob of key-value pairs which will be appended to the default system
+   * properties and handed off to the application at start time. All placeholder
+   * references to properties will be substituted before injection.
+   **/
+  public Configuration env(Map<String, String> env) {
+    this.env = env;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "A blob of key-value pairs which will be appended to the default system properties and handed off to the application at start time. All placeholder references to properties will be substituted before injection.")
+  @JsonProperty("env")
+  public Map<String, String> getEnv() {
+    return env;
+  }
+
+  public void setEnv(Map<String, String> env) {
+    this.env = env;
+  }
+
+  /**
+   * Array of list of files that needs to be created and made available as
+   * volumes in the application component containers.
+   **/
+  public Configuration files(List<ConfigFile> files) {
+    this.files = files;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Array of list of files that needs to be created and made available as volumes in the application component containers.")
+  @JsonProperty("files")
+  public List<ConfigFile> getFiles() {
+    return files;
+  }
+
+  public void setFiles(List<ConfigFile> files) {
+    this.files = files;
+  }
+
+  @Override
+  public boolean equals(java.lang.Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    Configuration configuration = (Configuration) o;
+    return Objects.equals(this.properties, configuration.properties)
+        && Objects.equals(this.env, configuration.env)
+        && Objects.equals(this.files, configuration.files);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(properties, env, files);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("class Configuration {\n");
+
+    sb.append("    properties: ").append(toIndentedString(properties))
+        .append("\n");
+    sb.append("    env: ").append(toIndentedString(env)).append("\n");
+    sb.append("    files: ").append(toIndentedString(files)).append("\n");
+    sb.append("}");
+    return sb.toString();
+  }
+
+  /**
+   * Convert the given object to string with each line indented by 4 spaces
+   * (except the first line).
+   */
+  private String toIndentedString(java.lang.Object o) {
+    if (o == null) {
+      return "null";
+    }
+    return o.toString().replace("\n", "\n    ");
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: YARN-6804. [yarn-native-services changes] Allow custom hostname for docker containers in native services. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
YARN-6804. [yarn-native-services changes] Allow custom hostname for docker containers in native services. Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc1dd1e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc1dd1e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc1dd1e5

Branch: refs/heads/yarn-native-services
Commit: cc1dd1e5f5f33190f51d84c088a08818f415884c
Parents: 36505c8
Author: Jian He <ji...@apache.org>
Authored: Fri Jul 21 14:49:42 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:27 2017 -0700

----------------------------------------------------------------------
 .../slider/core/launch/AbstractLauncher.java    |  7 ++++
 .../providers/AbstractProviderService.java      |  7 ++--
 .../providers/DefaultProviderService.java       |  9 +++--
 .../apache/slider/providers/ProviderUtils.java  |  3 +-
 .../providers/docker/DockerProviderService.java | 23 ++++++++++-
 .../tarball/TarballProviderService.java         | 10 +++--
 .../server/appmaster/state/RoleInstance.java    |  1 +
 .../server/dns/BaseServiceRecordProcessor.java  |  1 -
 .../dns/ContainerServiceRecordProcessor.java    |  9 ++++-
 .../registry/server/dns/TestRegistryDNS.java    | 40 ++++++++++----------
 .../runtime/DockerLinuxContainerRuntime.java    |  6 +++
 11 files changed, 79 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc1dd1e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
index 965ea35..8f61bf4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
@@ -81,6 +81,7 @@ public abstract class AbstractLauncher extends Configured {
   protected boolean yarnDockerMode = false;
   protected String dockerImage;
   protected String dockerNetwork = DEFAULT_DOCKER_NETWORK;
+  protected String dockerHostname;
   protected String yarnContainerMountPoints;
   protected String runPrivilegedContainer;
 
@@ -236,6 +237,8 @@ public abstract class AbstractLauncher extends Configured {
       env.put("YARN_CONTAINER_RUNTIME_TYPE", "docker");
       env.put("YARN_CONTAINER_RUNTIME_DOCKER_IMAGE", dockerImage);
       env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK", dockerNetwork);
+      env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_HOSTNAME",
+          dockerHostname);
       env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER", runPrivilegedContainer);
       StringBuilder sb = new StringBuilder();
       for (Entry<String,String> mount : mountPaths.entrySet()) {
@@ -469,6 +472,10 @@ public abstract class AbstractLauncher extends Configured {
     this.dockerNetwork = dockerNetwork;
   }
 
+  public void setDockerHostname(String dockerHostname) {
+    this.dockerHostname = dockerHostname;
+  }
+
   public void setYarnContainerMountPoints(String yarnContainerMountPoints) {
     this.yarnContainerMountPoints = yarnContainerMountPoints;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc1dd1e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java
index b7fa802..e0d9402 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java
@@ -60,8 +60,9 @@ public abstract class AbstractProviderService extends AbstractService
     super(name);
   }
 
-  public abstract void processArtifact(ContainerLauncher launcher, Component
-      component, SliderFileSystem fileSystem) throws IOException;
+  public abstract void processArtifact(ContainerLauncher launcher,
+      Application application, RoleInstance roleInstance,
+      SliderFileSystem fileSystem) throws IOException;
 
   @Override
   public void setAMState(StateAccessForProviders stateAccessor) {
@@ -78,7 +79,7 @@ public abstract class AbstractProviderService extends AbstractService
       SliderFileSystem fileSystem, RoleInstance roleInstance)
       throws IOException, SliderException {
     Component component = providerRole.component;
-    processArtifact(launcher, component, fileSystem);
+    processArtifact(launcher, application, roleInstance, fileSystem);
 
     // Generate tokens (key-value pair) for config substitution.
     // Get pre-defined tokens

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc1dd1e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultProviderService.java
index 7f7d209..8d2725c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultProviderService.java
@@ -17,10 +17,10 @@
  */
 package org.apache.slider.providers;
 
-import org.apache.slider.api.resource.Component;
+import org.apache.slider.api.resource.Application;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.core.launch.ContainerLauncher;
-import org.apache.slider.providers.AbstractProviderService;
+import org.apache.slider.server.appmaster.state.RoleInstance;
 
 import java.io.IOException;
 
@@ -31,7 +31,8 @@ public class DefaultProviderService extends AbstractProviderService {
   }
 
   @Override
-  public void processArtifact(ContainerLauncher launcher, Component
-      component, SliderFileSystem fileSystem) throws IOException {
+  public void processArtifact(ContainerLauncher launcher, Application
+      application, RoleInstance roleInstance, SliderFileSystem fileSystem)
+      throws IOException {
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc1dd1e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
index 0da535e..ecc521f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
@@ -504,8 +504,7 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
         // create and publish updated service record (including hostname & ip)
         ServiceRecord record = new ServiceRecord();
         record.set(YarnRegistryAttributes.YARN_ID, containerId);
-        String componentInstanceName = role.getCompInstanceName();
-        record.description = componentInstanceName.replaceAll("_", "-");
+        record.description = role.getCompInstanceName();
         record.set(YarnRegistryAttributes.YARN_PERSISTENCE,
             PersistencePolicies.CONTAINER);
         // TODO: use constants from YarnRegistryAttributes

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc1dd1e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
index f35d4d1..73783af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
@@ -17,14 +17,19 @@
  */
 package org.apache.slider.providers.docker;
 
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.Component;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.core.launch.ContainerLauncher;
 import org.apache.slider.providers.AbstractProviderService;
+import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.text.MessageFormat;
 
 public class DockerProviderService extends AbstractProviderService
     implements DockerKeys {
@@ -36,12 +41,26 @@ public class DockerProviderService extends AbstractProviderService
     super(DockerProviderService.class.getSimpleName());
   }
 
-  public void processArtifact(ContainerLauncher launcher, Component
-      component, SliderFileSystem fileSystem) throws IOException {
+  public void processArtifact(ContainerLauncher launcher, Application
+      application, RoleInstance roleInstance, SliderFileSystem fileSystem)
+      throws IOException {
+    Component component = roleInstance.providerRole.component;
     launcher.setYarnDockerMode(true);
     launcher.setDockerImage(component.getArtifact().getId());
     launcher.setDockerNetwork(component.getConfiguration()
         .getProperty(DOCKER_NETWORK, DEFAULT_DOCKER_NETWORK));
+    String domain = getConfig().get(RegistryConstants.KEY_DNS_DOMAIN);
+    String hostname;
+    if (domain == null || domain.isEmpty()) {
+      hostname = MessageFormat.format("{0}.{1}.{2}", roleInstance
+          .getCompInstanceName(), application.getName(), RegistryUtils
+          .currentUser());
+    } else {
+      hostname = MessageFormat.format("{0}.{1}.{2}.{3}", roleInstance
+          .getCompInstanceName(), application.getName(), RegistryUtils
+          .currentUser(), domain);
+    }
+    launcher.setDockerHostname(hostname);
     launcher.setRunPrivilegedContainer(component.getRunPrivilegedContainer());
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc1dd1e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderService.java
index 9dd3499..8be63fd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderService.java
@@ -20,10 +20,12 @@ package org.apache.slider.providers.tarball;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.Component;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.core.launch.ContainerLauncher;
 import org.apache.slider.providers.AbstractProviderService;
+import org.apache.slider.server.appmaster.state.RoleInstance;
 
 import java.io.IOException;
 
@@ -34,9 +36,11 @@ public class TarballProviderService extends AbstractProviderService {
   }
 
   @Override
-  public void processArtifact(ContainerLauncher launcher, Component
-      component, SliderFileSystem fileSystem) throws IOException {
-    Path artifact =  new Path(component.getArtifact().getId());
+  public void processArtifact(ContainerLauncher launcher, Application
+      application, RoleInstance roleInstance, SliderFileSystem fileSystem)
+      throws IOException {
+    Path artifact = new Path(roleInstance.providerRole.component
+        .getArtifact().getId());
     if (!fileSystem.isFile(artifact)) {
       throw new IOException("Package doesn't exist as a resource: " +
           artifact.toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc1dd1e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
index 9ac26b5..5619492 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
@@ -125,6 +125,7 @@ public final class RoleInstance implements Cloneable {
     } else {
       compInstanceName = role.name;
     }
+    compInstanceName = compInstanceName.toLowerCase().replaceAll("_", "-");
     this.providerRole = role;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc1dd1e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/BaseServiceRecordProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/BaseServiceRecordProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/BaseServiceRecordProcessor.java
index 1289fb3..2fe3a6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/BaseServiceRecordProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/BaseServiceRecordProcessor.java
@@ -285,7 +285,6 @@ public abstract class BaseServiceRecordProcessor
      */
     protected Name getContainerIDName() throws TextParseException {
       String containerID = RegistryPathUtils.lastPathEntry(getPath());
-      containerID = containerID.replace("container", "ctr");
       return Name.fromString(String.format("%s.%s", containerID, domain));
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc1dd1e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ContainerServiceRecordProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ContainerServiceRecordProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ContainerServiceRecordProcessor.java
index 75873d7..2e95f54 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ContainerServiceRecordProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ContainerServiceRecordProcessor.java
@@ -16,6 +16,7 @@
  */
 package org.apache.hadoop.registry.server.dns;
 
+import org.apache.hadoop.fs.PathNotFoundException;
 import org.apache.hadoop.registry.client.types.ServiceRecord;
 import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
 import org.xbill.DNS.Name;
@@ -156,9 +157,11 @@ public class ContainerServiceRecordProcessor extends
      */
     @Override protected void init(ServiceRecord serviceRecord) {
       try {
-        this.setNames(new Name[] {getContainerIDName()});
+        this.setNames(new Name[] {getContainerName()});
       } catch (TextParseException e) {
         // log
+      } catch (PathNotFoundException e) {
+        // log
       }
       List<String> txts = new ArrayList<>();
       txts.add("id=" + serviceRecord.get(YarnRegistryAttributes.YARN_ID));
@@ -200,9 +203,11 @@ public class ContainerServiceRecordProcessor extends
       }
       this.setNames(new Name[] {reverseLookupName});
       try {
-        this.setTarget(getContainerIDName());
+        this.setTarget(getContainerName());
       } catch (TextParseException e) {
         //LOG
+      } catch (PathNotFoundException e) {
+        //LOG
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc1dd1e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
index fcb602c..cc839cc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
@@ -111,7 +111,7 @@ public class TestRegistryDNS extends Assert {
       + "}\n";
   static final String CONTAINER_RECORD = "{\n"
       + "  \"type\" : \"JSONServiceRecord\",\n"
-      + "  \"description\" : \"YCLOUD\",\n"
+      + "  \"description\" : \"COMP-NAME\",\n"
       + "  \"external\" : [ ],\n"
       + "  \"internal\" : [ ],\n"
       + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000002\",\n"
@@ -122,7 +122,7 @@ public class TestRegistryDNS extends Assert {
 
   private static final String CONTAINER_RECORD_NO_IP = "{\n"
       + "  \"type\" : \"JSONServiceRecord\",\n"
-      + "  \"description\" : \"YCLOUD\",\n"
+      + "  \"description\" : \"COMP-NAME\",\n"
       + "  \"external\" : [ ],\n"
       + "  \"internal\" : [ ],\n"
       + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000002\",\n"
@@ -131,7 +131,7 @@ public class TestRegistryDNS extends Assert {
 
   private static final String CONTAINER_RECORD_YARN_PERSISTANCE_ABSENT = "{\n"
       + "  \"type\" : \"JSONServiceRecord\",\n"
-      + "  \"description\" : \"YCLOUD\",\n"
+      + "  \"description\" : \"COMP-NAME\",\n"
       + "  \"external\" : [ ],\n"
       + "  \"internal\" : [ ],\n"
       + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000003\",\n"
@@ -216,7 +216,7 @@ public class TestRegistryDNS extends Assert {
         CONTAINER_RECORD.getBytes());
     getRegistryDNS().register(
         "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "container-e50-1451931954322-0016-01-000002",
+            + "ctr-e50-1451931954322-0016-01-000002",
         record);
 
     // start assessing whether correct records are available
@@ -225,7 +225,7 @@ public class TestRegistryDNS extends Assert {
     assertEquals("wrong result", "172.17.0.19",
         ((ARecord) recs[0]).getAddress().getHostAddress());
 
-    recs = assertDNSQuery("ycloud.test1.root.hwx.test.", 1);
+    recs = assertDNSQuery("comp-name.test1.root.hwx.test.", 1);
     assertTrue("not an ARecord", recs[0] instanceof ARecord);
   }
 
@@ -235,7 +235,7 @@ public class TestRegistryDNS extends Assert {
         CONTAINER_RECORD_YARN_PERSISTANCE_ABSENT.getBytes());
     registryDNS.register(
         "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "container-e50-1451931954322-0016-01-000003",
+            + "ctr-e50-1451931954322-0016-01-000003",
          record);
 
     Name name =
@@ -254,7 +254,7 @@ public class TestRegistryDNS extends Assert {
         CONTAINER_RECORD.getBytes());
     getRegistryDNS().register(
         "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "container-e50-1451931954322-0016-01-000002",
+            + "ctr-e50-1451931954322-0016-01-000002",
         record);
 
     // start assessing whether correct records are available
@@ -264,7 +264,7 @@ public class TestRegistryDNS extends Assert {
         ((ARecord) recs[0]).getAddress().getHostAddress());
     assertEquals("wrong ttl", 30L, recs[0].getTTL());
 
-    recs = assertDNSQuery("ycloud.test1.root.hwx.test.", 1);
+    recs = assertDNSQuery("comp-name.test1.root.hwx.test.", 1);
     assertTrue("not an ARecord", recs[0] instanceof ARecord);
 
     assertEquals("wrong ttl", 30L, recs[0].getTTL());
@@ -276,13 +276,13 @@ public class TestRegistryDNS extends Assert {
         CONTAINER_RECORD.getBytes());
     getRegistryDNS().register(
         "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "container-e50-1451931954322-0016-01-000002",
+            + "ctr-e50-1451931954322-0016-01-000002",
         record);
 
     // start assessing whether correct records are available
     Record[] recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1);
     assertEquals("wrong result",
-        "ctr-e50-1451931954322-0016-01-000002.hwx.test.",
+        "comp-name.test1.root.hwx.test.",
         ((PTRRecord) recs[0]).getTarget().toString());
   }
 
@@ -302,13 +302,13 @@ public class TestRegistryDNS extends Assert {
         CONTAINER_RECORD.getBytes());
     getRegistryDNS().register(
         "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "container-e50-1451931954322-0016-01-000002",
+            + "ctr-e50-1451931954322-0016-01-000002",
         record);
 
     // start assessing whether correct records are available
     Record[] recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1);
     assertEquals("wrong result",
-        "ctr-e50-1451931954322-0016-01-000002.hwx.test.",
+        "comp-name.test1.root.hwx.test.",
         ((PTRRecord) recs[0]).getTarget().toString());
   }
 
@@ -318,7 +318,7 @@ public class TestRegistryDNS extends Assert {
         CONTAINER_RECORD.getBytes());
     getRegistryDNS().register(
         "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "container-e50-1451931954322-0016-01-000002",
+            + "ctr-e50-1451931954322-0016-01-000002",
         record);
 
     // start assessing whether correct records are available
@@ -339,7 +339,7 @@ public class TestRegistryDNS extends Assert {
         CONTAINER_RECORD_NO_IP.getBytes());
     getRegistryDNS().register(
         "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "container-e50-1451931954322-0016-01-000002",
+            + "ctr-e50-1451931954322-0016-01-000002",
         record);
 
     // start assessing whether correct records are available
@@ -453,7 +453,7 @@ public class TestRegistryDNS extends Assert {
         CONTAINER_RECORD.getBytes());
     getRegistryDNS().register(
         "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "container-e50-1451931954322-0016-01-000002",
+            + "ctr-e50-1451931954322-0016-01-000002",
         record);
 
     // start assessing whether correct records are available
@@ -462,7 +462,7 @@ public class TestRegistryDNS extends Assert {
     assertEquals("wrong result", "172.17.0.19",
         ((AAAARecord) recs[0]).getAddress().getHostAddress());
 
-    recs = assertDNSQuery("ycloud.test1.root.hwx.test.", Type.AAAA, 1);
+    recs = assertDNSQuery("comp-name.test1.root.hwx.test.", Type.AAAA, 1);
     assertTrue("not an ARecord", recs[0] instanceof AAAARecord);
   }
 
@@ -472,7 +472,7 @@ public class TestRegistryDNS extends Assert {
         CONTAINER_RECORD.getBytes());
     getRegistryDNS().register(
         "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "container-e50-1451931954322-0016-01-000002",
+            + "ctr-e50-1451931954322-0016-01-000002",
         record);
 
     // start assessing whether correct records are available
@@ -528,7 +528,7 @@ public class TestRegistryDNS extends Assert {
         CONTAINER_RECORD.getBytes());
     getRegistryDNS().register(
         "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "container-e50-1451931954322-0016-01-000002",
+            + "ctr-e50-1451931954322-0016-01-000002",
         record);
 
     // start assessing whether correct records are available
@@ -537,13 +537,13 @@ public class TestRegistryDNS extends Assert {
     assertEquals("wrong result", "172.17.0.19",
         ((ARecord) recs[0]).getAddress().getHostAddress());
 
-    recs = assertDNSQuery("ycloud.test1.root.hwx.test.", 1);
+    recs = assertDNSQuery("comp-name.test1.root.hwx.test.", 1);
     assertTrue("not an ARecord", recs[0] instanceof ARecord);
 
     // lookup dyanmic reverse records
     recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1);
     assertEquals("wrong result",
-        "ctr-e50-1451931954322-0016-01-000002.hwx.test.",
+        "comp-name.test1.root.hwx.test.",
         ((PTRRecord) recs[0]).getTarget().toString());
 
     // now lookup static reverse records

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc1dd1e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 57dadb2..df89d63 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
 import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
@@ -295,6 +296,11 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       throws ContainerExecutionException {
     if (name == null || name.isEmpty()) {
       name = RegistryPathUtils.encodeYarnID(containerIdStr);
+
+      String domain = conf.get(RegistryConstants.KEY_DNS_DOMAIN);
+      if (domain != null) {
+        name += ("." + domain);
+      }
       validateHostname(name);
     }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: YARN-6160. Create an agent-less docker-less provider in the native services framework. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
YARN-6160. Create an agent-less docker-less provider in the native services framework. Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62ceedf2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62ceedf2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62ceedf2

Branch: refs/heads/yarn-native-services
Commit: 62ceedf2975ea6bce11f74bfe4ee14b8a0e84feb
Parents: 427835d
Author: Jian He <ji...@apache.org>
Authored: Wed May 17 09:58:32 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:26 2017 -0700

----------------------------------------------------------------------
 .../org/apache/slider/client/SliderClient.java  |  34 ++--
 .../apache/slider/client/SliderClientAPI.java   |  10 --
 .../org/apache/slider/common/SliderKeys.java    |  18 +-
 .../slider/common/tools/ConfigHelper.java       |  39 -----
 .../providers/AbstractClientProvider.java       |  42 +++--
 .../providers/AbstractProviderService.java      | 169 +++++++++++++++++++
 .../slider/providers/DefaultClientProvider.java |  45 +++++
 .../providers/DefaultProviderFactory.java       |  47 ++++++
 .../providers/DefaultProviderService.java       |  37 ++++
 .../slider/providers/SliderProviderFactory.java | 105 ++++--------
 .../slider/providers/agent/AgentKeys.java       | 102 -----------
 .../providers/docker/DockerClientProvider.java  |  38 ++---
 .../slider/providers/docker/DockerKeys.java     |   3 -
 .../providers/docker/DockerProviderFactory.java |  21 ++-
 .../providers/docker/DockerProviderService.java | 140 +--------------
 .../tarball/TarballClientProvider.java          |  65 +++++++
 .../tarball/TarballProviderFactory.java         |  52 ++++++
 .../tarball/TarballProviderService.java         |  50 ++++++
 .../server/appmaster/RoleLaunchService.java     |  19 +--
 .../server/appmaster/SliderAppMaster.java       |  64 +++----
 .../slider/server/appmaster/web/WebAppApi.java  |   6 -
 .../server/appmaster/web/WebAppApiImpl.java     |  11 +-
 .../appmaster/web/view/SliderHamletBlock.java   |   3 -
 .../main/resources/org/apache/slider/slider.xml |  30 ----
 .../slider/providers/TestProviderFactory.java   |  40 +++--
 .../web/view/TestClusterSpecificationBlock.java |   4 -
 .../web/view/TestContainerStatsBlock.java       |   4 -
 .../appmaster/web/view/TestIndexBlock.java      |   4 -
 28 files changed, 664 insertions(+), 538 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index 83b4841..32d78b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -133,7 +133,6 @@ import org.apache.slider.core.zk.BlockingZKWatcher;
 import org.apache.slider.core.zk.ZKIntegration;
 import org.apache.slider.providers.AbstractClientProvider;
 import org.apache.slider.providers.ProviderUtils;
-import org.apache.slider.providers.SliderProviderFactory;
 import org.apache.slider.server.appmaster.SliderAppMaster;
 import org.apache.slider.server.appmaster.rpc.RpcBinder;
 import org.apache.slider.server.services.utility.AbstractSliderLaunchedService;
@@ -257,8 +256,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     config = super.bindArgs(config, args);
     serviceArgs = new ClientArgs(args);
     serviceArgs.parse();
-    // add the slider XML config
-    ConfigHelper.injectSliderXMLResource();
     // yarn-ify
     YarnConfiguration yarnConfiguration = new YarnConfiguration(config);
     return patchConfiguration(yarnConfiguration);
@@ -622,14 +619,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     return EXIT_SUCCESS;
   }
 
-  @Override
-  public AbstractClientProvider createClientProvider(String provider)
-    throws SliderException {
-    SliderProviderFactory factory =
-      SliderProviderFactory.createSliderProviderFactory(provider);
-    return factory.createClientProvider();
-  }
-
   private Application getApplicationFromArgs(String clusterName,
       AbstractClusterBuildingActionArgs args) throws IOException {
     File file = args.getAppDef();
@@ -893,7 +882,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
 
   private void persistApp(Path appDir, Application application)
       throws IOException, SliderException {
-    FsPermission appDirPermission = new FsPermission("777");
+    FsPermission appDirPermission = new FsPermission("750");
     sliderFileSystem.createWithPermissions(appDir, appDirPermission);
     Path appJson = new Path(appDir, application.getName() + ".json");
     jsonSerDeser
@@ -1190,17 +1179,18 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
       }
     }
 
+    // TODO handle client install
     // Only INSTALL is supported
-    AbstractClientProvider
-        provider = createClientProvider(SliderProviderFactory.DEFAULT_CLUSTER_TYPE);
-    provider.processClientOperation(sliderFileSystem,
-        getRegistryOperations(),
-        getConfig(),
-        "INSTALL",
-        clientInfo.installLocation,
-        pkgFile,
-        config,
-        clientInfo.name);
+    //    ClientProvider
+    //        provider = createClientProvider(SliderProviderFactory.DEFAULT_CLUSTER_TYPE);
+    //    provider.processClientOperation(sliderFileSystem,
+    //        getRegistryOperations(),
+    //        getConfig(),
+    //        "INSTALL",
+    //        clientInfo.installLocation,
+    //        pkgFile,
+    //        config,
+    //        clientInfo.name);
     return EXIT_SUCCESS;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
index 197a564..7477c05 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
@@ -42,7 +42,6 @@ import org.apache.slider.common.params.ActionThawArgs;
 import org.apache.slider.common.params.ActionUpgradeArgs;
 import org.apache.slider.core.exceptions.BadCommandArgumentsException;
 import org.apache.slider.core.exceptions.SliderException;
-import org.apache.slider.providers.AbstractClientProvider;
 
 import java.io.IOException;
 
@@ -63,15 +62,6 @@ public interface SliderClientAPI extends Service {
       ActionAMSuicideArgs args) throws YarnException, IOException;
 
   /**
-   * Get the provider for this cluster
-   * @param provider the name of the provider
-   * @return the provider instance
-   * @throws SliderException problems building the provider
-   */
-  AbstractClientProvider createClientProvider(String provider)
-    throws SliderException;
-
-  /**
    * Manage keytabs leveraged by slider
    *
    * @param keytabInfo the arguments needed to manage the keytab

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
index 968a90b..734fec5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
@@ -18,8 +18,6 @@
 
 package org.apache.slider.common;
 
-import org.apache.hadoop.yarn.api.ApplicationConstants;
-
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -158,12 +156,7 @@ public interface SliderKeys extends SliderXmlConfKeys {
    */
   String HISTORY_FILENAME_GLOB_PATTERN = HISTORY_FILENAME_PREFIX +"*."+
                                     HISTORY_FILENAME_SUFFIX;
-  /**
-   * XML resource listing the standard Slider providers
-   * {@value}
-   */
-  String SLIDER_XML = "org/apache/slider/slider.xml";
-  
+
   String CLUSTER_DIRECTORY = "cluster";
 
   String PACKAGE_DIRECTORY = "package";
@@ -311,5 +304,12 @@ public interface SliderKeys extends SliderXmlConfKeys {
   String APP_RESOURCES = "application.resources";
   String APP_RESOURCES_DIR = "app/resources";
 
-  String APP_PACKAGES_DIR = "app/packages";
+  String APP_INSTALL_DIR = "app/install";
+
+  String OUT_FILE = "stdout.txt";
+  String ERR_FILE = "stderr.txt";
+
+  String QUICK_LINKS = "quicklinks";
+
+  String KEY_CONTAINER_LAUNCH_DELAY = "container.launch.delay.sec";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java
index 0e94a29..25debdc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java
@@ -50,7 +50,6 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
-import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
  * Methods to aid in config, both in the Configuration class and
@@ -62,11 +61,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
 public class ConfigHelper {
   private static final Logger log = LoggerFactory.getLogger(ConfigHelper.class);
 
-  private static AtomicBoolean sliderResourceInjected =
-      new AtomicBoolean(false);
-  private static AtomicBoolean sliderResourceInjectionAttempted =
-      new AtomicBoolean(false);
-  
   /**
    * Dump the (sorted) configuration
    * @param conf config
@@ -614,37 +608,4 @@ public class ConfigHelper {
   public static void registerDeprecatedConfigItems() {
   }
 
-  /**
-   * Load a configuration with the {@link SliderKeys#SLIDER_XML} resource
-   * included
-   * @return a configuration instance
-   */
-  public static Configuration loadSliderConfiguration() {
-    Configuration conf = new Configuration();
-    conf.addResource(SliderKeys.SLIDER_XML);
-    return conf;
-  }
-
-  /**
-   * Inject the {@link SliderKeys#SLIDER_XML} resource
-   * into the configuration resources <i>of all configurations</i>.
-   * <p>
-   *   This operation is idempotent.
-   * <p>
-   * If the resource is not on the classpath, downgrades, rather than
-   * fails.
-   * @return true if the resource was found and loaded.
-   */
-  public static synchronized boolean injectSliderXMLResource() {
-    if (sliderResourceInjectionAttempted.getAndSet(true)) {
-      return sliderResourceInjected.get();
-    }
-    URL resourceUrl = getResourceUrl(SliderKeys.SLIDER_XML);
-    if (resourceUrl != null) {
-      Configuration.addDefaultResource(SliderKeys.SLIDER_XML);
-      sliderResourceInjected.set(true);
-    }
-    return sliderResourceInjected.get();
-  }
-  
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
index df174f5..185dcd4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
@@ -19,32 +19,26 @@
 package org.apache.slider.providers;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.slider.api.resource.Artifact;
+import org.apache.slider.api.resource.ConfigFile;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.common.tools.SliderUtils;
 import org.apache.slider.core.exceptions.SliderException;
 import org.codehaus.jettison.json.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.File;
+import java.io.IOException;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
-public abstract class AbstractClientProvider extends Configured {
-  private static final Logger log =
-    LoggerFactory.getLogger(AbstractClientProvider.class);
+public abstract class AbstractClientProvider {
 
-  public AbstractClientProvider(Configuration conf) {
-    super(conf);
+  public AbstractClientProvider() {
   }
 
-  public abstract String getName();
-
-  public abstract List<ProviderRole> getRoles();
-
   /**
    * Generates a fixed format of application tags given one or more of
    * application name, version and description. This allows subsequent query for
@@ -70,7 +64,29 @@ public abstract class AbstractClientProvider extends Configured {
   }
 
   /**
-   * Process client operations for applications such as install, configure
+   * Validate the artifact.
+   * @param artifact
+   */
+  public abstract void validateArtifact(Artifact artifact, FileSystem
+      fileSystem) throws IOException;
+
+  protected abstract void validateConfigFile(ConfigFile configFile, FileSystem
+      fileSystem) throws IOException;
+
+  /**
+   * Validate the config files.
+   * @param configFiles config file list
+   * @param fileSystem file system
+   */
+  public void validateConfigFiles(List<ConfigFile> configFiles, FileSystem
+      fileSystem) throws IOException {
+    for (ConfigFile configFile : configFiles) {
+      validateConfigFile(configFile, fileSystem);
+    }
+  }
+
+  /**
+   * Process client operations for applications such as install, configure.
    * @param fileSystem
    * @param registryOperations
    * @param configuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java
new file mode 100644
index 0000000..b7fa802
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractProviderService.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers;
+
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.api.resource.ContainerState;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.core.launch.CommandLineBuilder;
+import org.apache.slider.core.launch.ContainerLauncher;
+import org.apache.slider.core.registry.docstore.PublishedConfiguration;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.StateAccessForProviders;
+import org.apache.slider.server.appmaster.timelineservice.ServiceTimelinePublisher;
+import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import static org.apache.slider.util.ServiceApiUtil.$;
+
+public abstract class AbstractProviderService extends AbstractService
+    implements ProviderService, SliderKeys {
+
+  protected static final Logger log =
+      LoggerFactory.getLogger(AbstractProviderService.class);
+  private static final ProviderUtils providerUtils = new ProviderUtils(log);
+  protected StateAccessForProviders amState;
+  protected YarnRegistryViewForProviders yarnRegistry;
+  private ServiceTimelinePublisher serviceTimelinePublisher;
+
+  protected AbstractProviderService(String name) {
+    super(name);
+  }
+
+  public abstract void processArtifact(ContainerLauncher launcher, Component
+      component, SliderFileSystem fileSystem) throws IOException;
+
+  @Override
+  public void setAMState(StateAccessForProviders stateAccessor) {
+    this.amState = stateAccessor;
+  }
+
+  @Override
+  public void bindToYarnRegistry(YarnRegistryViewForProviders yarnRegistry) {
+    this.yarnRegistry = yarnRegistry;
+  }
+
+  public void buildContainerLaunchContext(ContainerLauncher launcher,
+      Application application, Container container, ProviderRole providerRole,
+      SliderFileSystem fileSystem, RoleInstance roleInstance)
+      throws IOException, SliderException {
+    Component component = providerRole.component;
+    processArtifact(launcher, component, fileSystem);
+
+    // Generate tokens (key-value pair) for config substitution.
+    // Get pre-defined tokens
+    Map<String, String> globalTokens = amState.getGlobalSubstitutionTokens();
+    Map<String, String> tokensForSubstitution = providerUtils
+        .initCompTokensForSubstitute(roleInstance);
+    tokensForSubstitution.putAll(globalTokens);
+    // Set the environment variables in launcher
+    launcher.putEnv(SliderUtils
+        .buildEnvMap(component.getConfiguration(), tokensForSubstitution));
+    launcher.setEnv("WORK_DIR", ApplicationConstants.Environment.PWD.$());
+    launcher.setEnv("LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR);
+    if (System.getenv(HADOOP_USER_NAME) != null) {
+      launcher.setEnv(HADOOP_USER_NAME, System.getenv(HADOOP_USER_NAME));
+    }
+    launcher.setEnv("LANG", "en_US.UTF-8");
+    launcher.setEnv("LC_ALL", "en_US.UTF-8");
+    launcher.setEnv("LANGUAGE", "en_US.UTF-8");
+
+    for (Entry<String, String> entry : launcher.getEnv().entrySet()) {
+      tokensForSubstitution.put($(entry.getKey()), entry.getValue());
+    }
+    providerUtils.addComponentHostTokens(tokensForSubstitution, amState);
+
+    // create config file on hdfs and add local resource
+    providerUtils.createConfigFileAndAddLocalResource(launcher, fileSystem,
+        component, tokensForSubstitution, roleInstance, amState);
+
+    // substitute launch command
+    String launchCommand = ProviderUtils
+        .substituteStrWithTokens(component.getLaunchCommand(),
+            tokensForSubstitution);
+    CommandLineBuilder operation = new CommandLineBuilder();
+    operation.add(launchCommand);
+    operation.addOutAndErrFiles(OUT_FILE, ERR_FILE);
+    launcher.addCommand(operation.build());
+
+    // publish exports
+    providerUtils
+        .substituteMapWithTokens(application.getQuicklinks(), tokensForSubstitution);
+    PublishedConfiguration pubconf = new PublishedConfiguration(QUICK_LINKS,
+        application.getQuicklinks().entrySet());
+    amState.getPublishedSliderConfigurations().put(QUICK_LINKS, pubconf);
+    if (serviceTimelinePublisher != null) {
+      serviceTimelinePublisher.serviceAttemptUpdated(application);
+    }
+  }
+
+  public boolean processContainerStatus(ContainerId containerId,
+      ContainerStatus status) {
+    log.debug("Handling container status: {}", status);
+    if (SliderUtils.isEmpty(status.getIPs()) ||
+        SliderUtils.isUnset(status.getHost())) {
+      return true;
+    }
+    RoleInstance instance = amState.getOwnedContainer(containerId);
+    if (instance == null) {
+      // container is completed?
+      return false;
+    }
+
+    try {
+      providerUtils.updateServiceRecord(amState, yarnRegistry,
+          containerId.toString(), instance.role, status.getIPs(), status.getHost());
+    } catch (IOException e) {
+      // could not write service record to ZK, log and retry
+      log.warn("Error updating container {} service record in registry, " +
+          "retrying", containerId, e);
+      return true;
+    }
+    // TODO publish ip and host
+    org.apache.slider.api.resource.Container container =
+        instance.providerRole.component.getContainer(containerId.toString());
+    if (container != null) {
+      container.setIp(StringUtils.join(",", status.getIPs()));
+      container.setHostname(status.getHost());
+      container.setState(ContainerState.READY);
+    } else {
+      log.warn(containerId + " not found in Application!");
+    }
+    return false;
+  }
+
+  @Override
+  public void setServiceTimelinePublisher(ServiceTimelinePublisher publisher) {
+    this.serviceTimelinePublisher = publisher;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultClientProvider.java
new file mode 100644
index 0000000..004e81f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultClientProvider.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.slider.api.resource.Artifact;
+import org.apache.slider.api.resource.ConfigFile;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+
+public class DefaultClientProvider extends AbstractClientProvider {
+
+  public DefaultClientProvider() {
+  }
+
+  @Override
+  public void validateArtifact(Artifact artifact, FileSystem fileSystem) {
+  }
+
+  @Override
+  protected void validateConfigFile(ConfigFile configFile, FileSystem
+      fileSystem) throws IOException {
+    // validate dest_file is not absolute
+    if (Paths.get(configFile.getDestFile()).isAbsolute()) {
+      throw new IllegalArgumentException(
+          "Dest_file must not be absolute path: " + configFile.getDestFile());
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultProviderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultProviderFactory.java
new file mode 100644
index 0000000..09a1423
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultProviderFactory.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers;
+
+public final class DefaultProviderFactory extends SliderProviderFactory {
+  private static final SliderProviderFactory FACTORY = new
+      DefaultProviderFactory();
+
+  private DefaultProviderFactory() {}
+
+  private static class Client {
+    static final AbstractClientProvider PROVIDER = new DefaultClientProvider();
+  }
+
+  private static class Server {
+    static final ProviderService PROVIDER = new DefaultProviderService();
+  }
+
+  @Override
+  public AbstractClientProvider createClientProvider() {
+    return Client.PROVIDER;
+  }
+
+  @Override
+  public ProviderService createServerProvider() {
+    return Server.PROVIDER;
+  }
+
+  public static SliderProviderFactory getInstance() {
+    return FACTORY;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultProviderService.java
new file mode 100644
index 0000000..7f7d209
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/DefaultProviderService.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers;
+
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.core.launch.ContainerLauncher;
+import org.apache.slider.providers.AbstractProviderService;
+
+import java.io.IOException;
+
+public class DefaultProviderService extends AbstractProviderService {
+
+  protected DefaultProviderService() {
+    super(DefaultProviderService.class.getSimpleName());
+  }
+
+  @Override
+  public void processArtifact(ContainerLauncher launcher, Component
+      component, SliderFileSystem fileSystem) throws IOException {
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/SliderProviderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/SliderProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/SliderProviderFactory.java
index 5dd4a32..9c52643 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/SliderProviderFactory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/SliderProviderFactory.java
@@ -18,93 +18,60 @@
 
 package org.apache.slider.providers;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.slider.common.SliderKeys;
-import org.apache.slider.common.SliderXmlConfKeys;
-import org.apache.slider.core.exceptions.BadClusterStateException;
+import org.apache.slider.api.resource.Artifact;
 import org.apache.slider.core.exceptions.SliderException;
-import org.apache.slider.providers.agent.AgentKeys;
+import org.apache.slider.providers.docker.DockerProviderFactory;
+import org.apache.slider.providers.tarball.TarballProviderFactory;
+import org.apache.slider.util.RestApiErrorMessages;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Base class for factories
+ * Base class for factories.
  */
-public abstract class SliderProviderFactory extends Configured {
-
-  public static final String DEFAULT_CLUSTER_TYPE = AgentKeys.PROVIDER_AGENT;
-  
+public abstract class SliderProviderFactory {
   protected static final Logger log =
-    LoggerFactory.getLogger(SliderProviderFactory.class);
-  public static final String PROVIDER_NOT_FOUND =
-    "Unable to find provider of application type %s";
-
-  public SliderProviderFactory(Configuration conf) {
-    super(conf);
-  }
+      LoggerFactory.getLogger(SliderProviderFactory.class);
 
-  protected SliderProviderFactory() {
-  }
+  protected SliderProviderFactory() {}
 
   public abstract AbstractClientProvider createClientProvider();
 
   public abstract ProviderService createServerProvider();
 
+  public static synchronized ProviderService getProviderService(Artifact
+      artifact) {
+    return createSliderProviderFactory(artifact).createServerProvider();
+  }
+
+  public static synchronized AbstractClientProvider getClientProvider(Artifact
+      artifact) {
+    return createSliderProviderFactory(artifact).createClientProvider();
+  }
+
   /**
    * Create a provider for a specific application
-   * @param application app
-   * @return app instance
+   * @param artifact artifact
+   * @return provider factory
    * @throws SliderException on any instantiation problem
    */
-  public static SliderProviderFactory createSliderProviderFactory(String application) throws
-      SliderException {
-    Configuration conf = loadSliderConfiguration();
-    if (application == null) {
-      application = DEFAULT_CLUSTER_TYPE;
+  public static synchronized SliderProviderFactory createSliderProviderFactory(
+      Artifact artifact) {
+    if (artifact == null || artifact.getType() == null) {
+      log.info("Loading service provider type default");
+      return DefaultProviderFactory.getInstance();
     }
-    String providerKey =
-      String.format(SliderXmlConfKeys.KEY_PROVIDER, application);
-    if (application.contains(".")) {
-      log.debug("Treating {} as a classname", application);
-      String name = "classname.key";
-      conf.set(name, application);
-      providerKey = name;
+    log.info("Loading service provider type {}", artifact.getType());
+    switch (artifact.getType()) {
+      // TODO add handling for custom types?
+      // TODO handle application
+      case DOCKER:
+        return DockerProviderFactory.getInstance();
+      case TARBALL:
+        return TarballProviderFactory.getInstance();
+      default:
+        throw new IllegalArgumentException(
+            RestApiErrorMessages.ERROR_ARTIFACT_INVALID);
     }
-    
-    Class<? extends SliderProviderFactory> providerClass;
-    try {
-      providerClass = conf.getClass(providerKey, null, SliderProviderFactory.class);
-    } catch (RuntimeException e) {
-      throw new BadClusterStateException(e, "Failed to load provider %s: %s", application, e);
-    }
-    if (providerClass == null) {
-      throw new BadClusterStateException(PROVIDER_NOT_FOUND, application);
-    }
-
-    Exception ex;
-    try {
-      SliderProviderFactory providerFactory = providerClass.newInstance();
-      providerFactory.setConf(conf);
-      return providerFactory;
-    } catch (Exception e) {
-      ex = e;
-    }
-    //by here the operation failed and ex is set to the value 
-    throw new BadClusterStateException(ex,
-                              "Failed to create an instance of %s : %s",
-                              providerClass,
-                              ex);
-  }
-
-  /**
-   * Load a configuration with the {@link SliderKeys#SLIDER_XML} resource
-   * included
-   * @return a configuration instance
-   */
-  public static Configuration loadSliderConfiguration() {
-    Configuration conf = new Configuration();
-    conf.addResource(SliderKeys.SLIDER_XML);
-    return conf;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentKeys.java
deleted file mode 100644
index c4228e4..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentKeys.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.slider.providers.agent;
-
-/*
-
- */
-public interface AgentKeys {
-
-  String AGENT_TAR = "slider-agent.tar.gz";
-  String PROVIDER_AGENT = "agent";
-  String ROLE_NODE = "echo";
-
-  /**
-   * Template stored in the slider classpath -to use if there is
-   * no site-specific template
-   * {@value}
-   */
-  String CONF_RESOURCE = "org/apache/slider/providers/agent/conf/";
-  /*  URL to talk back to Agent Controller*/
-  String CONTROLLER_URL = "agent.controller.url";
-  /**
-   * The location of pre-installed agent path.
-   * This can be also be dynamically computed based on Yarn installation of agent.
-   */
-  String PACKAGE_PATH = "agent.package.root";
-  /**
-   * The location of the script implementing the command.
-   */
-  String SCRIPT_PATH = "agent.script";
-  /**
-   * Execution home for the agent.
-   */
-  String APP_HOME = "app.home";
-  String APP_ROOT = "site.global.app_root";
-  String APP_CLIENT_ROOT = "client_root";
-  /**
-   * Runas user of the application
-   */
-  String RUNAS_USER = "site.global.app_user";
-  /**
-   * Name of the service.
-   */
-  String SERVICE_NAME = "app.name";
-  String ARG_LABEL = "--label";
-  String ARG_HOST = "--host";
-  String ARG_PORT = "--port";
-  String ARG_SECURED_PORT = "--secured_port";
-  String ARG_ZOOKEEPER_QUORUM = "--zk-quorum";
-  String ARG_ZOOKEEPER_REGISTRY_PATH = "--zk-reg-path";
-  String ARG_DEBUG = "--debug";
-  String AGENT_MAIN_SCRIPT_ROOT = "./infra/agent/slider-agent/";
-  String AGENT_JINJA2_ROOT = "./infra/agent/slider-agent/jinja2";
-  String AGENT_MAIN_SCRIPT = "agent/main.py";
-
-  String APP_DEF = "application.def";
-  String APP_DEF_ORIGINAL = "application.def.original";
-  String ADDON_PREFIX = "application.addon.";
-  String ADDONS = "application.addons";
-  String AGENT_VERSION = "agent.version";
-  String AGENT_CONF = "agent.conf";
-  String ADDON_FOR_ALL_COMPONENTS = "ALL";
-
-  String AGENT_INSTALL_DIR = "infra/agent";
-  String APP_DEFINITION_DIR = "app/definition";
-  String ADDON_DEFINITION_DIR = "addon/definition";
-  String AGENT_CONFIG_FILE = "infra/conf/agent.ini";
-  String AGENT_VERSION_FILE = "infra/version";
-
-  String PACKAGE_LIST = "package_list";
-  String WAIT_HEARTBEAT = "wait.heartbeat";
-  String PYTHON_EXE = "python";
-  String CREATE_DEF_ZK_NODE = "create.default.zookeeper.node";
-  String HEARTBEAT_MONITOR_INTERVAL = "heartbeat.monitor.interval";
-  String AGENT_INSTANCE_DEBUG_DATA = "agent.instance.debug.data";
-  String AGENT_OUT_FILE = "slider-agent.out";
-  String KEY_AGENT_TWO_WAY_SSL_ENABLED = "ssl.server.client.auth";
-  String INFRA_RUN_SECURITY_DIR = "infra/run/security/";
-  String CERT_FILE_LOCALIZATION_PATH = INFRA_RUN_SECURITY_DIR + "ca.crt";
-  String KEY_CONTAINER_LAUNCH_DELAY = "container.launch.delay.sec";
-  String TEST_RELAX_VERIFICATION = "test.relax.validation";
-
-  String DEFAULT_METAINFO_MAP_KEY = "DEFAULT_KEY";
-}
-
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java
index 1d5d8a0..4773327 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java
@@ -17,37 +17,37 @@
  */
 package org.apache.slider.providers.docker;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.slider.api.resource.Artifact;
+import org.apache.slider.api.resource.ConfigFile;
 import org.apache.slider.common.SliderKeys;
 import org.apache.slider.providers.AbstractClientProvider;
-import org.apache.slider.providers.ProviderRole;
-import org.apache.slider.providers.ProviderUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import org.apache.slider.util.RestApiErrorMessages;
 
-import java.util.Collections;
-import java.util.List;
+import java.io.IOException;
 
 public class DockerClientProvider extends AbstractClientProvider
     implements SliderKeys {
 
-  protected static final Logger log =
-      LoggerFactory.getLogger(DockerClientProvider.class);
-  private static final ProviderUtils providerUtils = new ProviderUtils(log);
-  protected static final String NAME = "docker";
-
-  public DockerClientProvider(Configuration conf) {
-    super(conf);
+  public DockerClientProvider() {
+    super();
   }
 
   @Override
-  public String getName() {
-    return NAME;
+  public void validateArtifact(Artifact artifact, FileSystem fileSystem) {
+    if (artifact == null) {
+      throw new IllegalArgumentException(
+          RestApiErrorMessages.ERROR_ARTIFACT_INVALID);
+    }
+    if (StringUtils.isEmpty(artifact.getId())) {
+      throw new IllegalArgumentException(
+          RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID);
+    }
   }
 
   @Override
-  public List<ProviderRole> getRoles() {
-    return Collections.emptyList();
+  protected void validateConfigFile(ConfigFile configFile, FileSystem
+      fileSystem) throws IOException {
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerKeys.java
index 0e1d288..7413ebb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerKeys.java
@@ -27,7 +27,4 @@ public interface DockerKeys {
 
   String DEFAULT_DOCKER_NETWORK = "bridge";
   Boolean DEFAULT_DOCKER_USE_PRIVILEGED = false;
-
-  String OUT_FILE = "stdout.txt";
-  String ERR_FILE = "stderr.txt";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderFactory.java
index 5d2592f..6977e41 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderFactory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderFactory.java
@@ -17,27 +17,36 @@
  */
 package org.apache.slider.providers.docker;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.slider.providers.AbstractClientProvider;
 import org.apache.slider.providers.ProviderService;
 import org.apache.slider.providers.SliderProviderFactory;
 
 public class DockerProviderFactory extends SliderProviderFactory {
+  private static final SliderProviderFactory FACTORY = new
+      DockerProviderFactory();
 
-  public DockerProviderFactory() {
+  private DockerProviderFactory() {
   }
 
-  public DockerProviderFactory(Configuration conf) {
-    super(conf);
+  private static class Client {
+    static final AbstractClientProvider PROVIDER = new DockerClientProvider();
+  }
+
+  private static class Server {
+    static final ProviderService PROVIDER = new DockerProviderService();
   }
 
   @Override
   public AbstractClientProvider createClientProvider() {
-    return new DockerClientProvider(getConf());
+    return Client.PROVIDER;
   }
 
   @Override
   public ProviderService createServerProvider() {
-    return new DockerProviderService();
+    return Server.PROVIDER;
+  }
+
+  public static SliderProviderFactory getInstance() {
+    return FACTORY;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
index a48bf83..f35d4d1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
@@ -17,159 +17,31 @@
  */
 package org.apache.slider.providers.docker;
 
-import org.apache.hadoop.service.AbstractService;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.api.ApplicationConstants;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.Component;
-import org.apache.slider.api.resource.ContainerState;
-import org.apache.slider.common.SliderKeys;
 import org.apache.slider.common.tools.SliderFileSystem;
-import org.apache.slider.common.tools.SliderUtils;
-import org.apache.slider.core.exceptions.SliderException;
-import org.apache.slider.core.launch.CommandLineBuilder;
 import org.apache.slider.core.launch.ContainerLauncher;
-import org.apache.slider.core.registry.docstore.PublishedConfiguration;
-import org.apache.slider.providers.ProviderRole;
-import org.apache.slider.providers.ProviderService;
-import org.apache.slider.providers.ProviderUtils;
-import org.apache.slider.server.appmaster.state.RoleInstance;
-import org.apache.slider.server.appmaster.state.StateAccessForProviders;
-import org.apache.slider.server.appmaster.timelineservice.ServiceTimelinePublisher;
-import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
+import org.apache.slider.providers.AbstractProviderService;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.Map;
-import java.util.Map.Entry;
 
-import static org.apache.slider.util.ServiceApiUtil.$;
-
-public class DockerProviderService extends AbstractService
-    implements ProviderService, DockerKeys, SliderKeys {
+public class DockerProviderService extends AbstractProviderService
+    implements DockerKeys {
 
   protected static final Logger log =
       LoggerFactory.getLogger(DockerProviderService.class);
-  private static final ProviderUtils providerUtils = new ProviderUtils(log);
-  private static final String QUICK_LINKS = "quicklinks";
-  protected StateAccessForProviders amState;
-  protected YarnRegistryViewForProviders yarnRegistry;
-  private ServiceTimelinePublisher serviceTimelinePublisher;
 
   protected DockerProviderService() {
-    super("DockerProviderService");
-  }
-
-  @Override
-  public void setAMState(StateAccessForProviders stateAccessor) {
-    this.amState = stateAccessor;
-  }
-
-  @Override
-  public void bindToYarnRegistry(YarnRegistryViewForProviders yarnRegistry) {
-    this.yarnRegistry = yarnRegistry;
+    super(DockerProviderService.class.getSimpleName());
   }
 
-
-  public void buildContainerLaunchContext(ContainerLauncher launcher,
-      Application application, Container container, ProviderRole providerRole,
-      SliderFileSystem fileSystem, RoleInstance roleInstance)
-      throws IOException, SliderException {
-    Component component = providerRole.component;
+  public void processArtifact(ContainerLauncher launcher, Component
+      component, SliderFileSystem fileSystem) throws IOException {
     launcher.setYarnDockerMode(true);
     launcher.setDockerImage(component.getArtifact().getId());
     launcher.setDockerNetwork(component.getConfiguration()
         .getProperty(DOCKER_NETWORK, DEFAULT_DOCKER_NETWORK));
     launcher.setRunPrivilegedContainer(component.getRunPrivilegedContainer());
-
-    // Generate tokens (key-value pair) for config substitution.
-    // Get pre-defined tokens
-    Map<String, String> globalTokens = amState.getGlobalSubstitutionTokens();
-    Map<String, String> tokensForSubstitution = providerUtils
-        .initCompTokensForSubstitute(roleInstance);
-    tokensForSubstitution.putAll(globalTokens);
-    // Set the environment variables in launcher
-    launcher.putEnv(SliderUtils
-        .buildEnvMap(component.getConfiguration(), tokensForSubstitution));
-    launcher.setEnv("WORK_DIR", ApplicationConstants.Environment.PWD.$());
-    launcher.setEnv("LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR);
-    if (System.getenv(HADOOP_USER_NAME) != null) {
-      launcher.setEnv(HADOOP_USER_NAME, System.getenv(HADOOP_USER_NAME));
-    }
-    launcher.setEnv("LANG", "en_US.UTF-8");
-    launcher.setEnv("LC_ALL", "en_US.UTF-8");
-    launcher.setEnv("LANGUAGE", "en_US.UTF-8");
-
-    for (Entry<String, String> entry : launcher.getEnv().entrySet()) {
-      tokensForSubstitution.put($(entry.getKey()), entry.getValue());
-    }
-    providerUtils.addComponentHostTokens(tokensForSubstitution, amState);
-
-    // create config file on hdfs and add local resource
-    providerUtils.createConfigFileAndAddLocalResource(launcher, fileSystem,
-        component, tokensForSubstitution, roleInstance, amState);
-
-    // substitute launch command
-    String launchCommand = ProviderUtils
-        .substituteStrWithTokens(component.getLaunchCommand(),
-            tokensForSubstitution);
-    CommandLineBuilder operation = new CommandLineBuilder();
-    operation.add(launchCommand);
-    operation.addOutAndErrFiles(OUT_FILE, ERR_FILE);
-    launcher.addCommand(operation.build());
-
-    // publish exports
-    providerUtils
-        .substituteMapWithTokens(application.getQuicklinks(), tokensForSubstitution);
-    PublishedConfiguration pubconf = new PublishedConfiguration(QUICK_LINKS,
-        application.getQuicklinks().entrySet());
-    amState.getPublishedSliderConfigurations().put(QUICK_LINKS, pubconf);
-    if (serviceTimelinePublisher != null) {
-      serviceTimelinePublisher.serviceAttemptUpdated(application);
-    }
-  }
-
-  public boolean processContainerStatus(ContainerId containerId,
-      ContainerStatus status) {
-    log.debug("Handling container status: {}", status);
-    if (SliderUtils.isEmpty(status.getIPs()) ||
-        SliderUtils.isUnset(status.getHost())) {
-      return true;
-    }
-    RoleInstance instance = amState.getOwnedContainer(containerId);
-    if (instance == null) {
-      // container is completed?
-      return false;
-    }
-
-    try {
-      providerUtils.updateServiceRecord(amState, yarnRegistry,
-          containerId.toString(), instance.role, status.getIPs(), status.getHost());
-    } catch (IOException e) {
-      // could not write service record to ZK, log and retry
-      log.warn("Error updating container {} service record in registry, " +
-          "retrying", containerId, e);
-      return true;
-    }
-    // TODO publish ip and host
-    org.apache.slider.api.resource.Container container =
-        instance.providerRole.component.getContainer(containerId.toString());
-    if (container != null) {
-      container.setIp(StringUtils.join(",", status.getIPs()));
-      container.setHostname(status.getHost());
-      container.setState(ContainerState.READY);
-    } else {
-      log.warn(containerId + " not found in Application!");
-    }
-    return false;
-  }
-
-  @Override
-  public void setServiceTimelinePublisher(ServiceTimelinePublisher publisher) {
-    this.serviceTimelinePublisher = publisher;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballClientProvider.java
new file mode 100644
index 0000000..3e020cf
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballClientProvider.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers.tarball;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.api.resource.Artifact;
+import org.apache.slider.api.resource.ConfigFile;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.providers.AbstractClientProvider;
+import org.apache.slider.util.RestApiErrorMessages;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+
+public class TarballClientProvider extends AbstractClientProvider
+    implements SliderKeys {
+
+  public TarballClientProvider() {
+  }
+
+  @Override
+  public void validateArtifact(Artifact artifact, FileSystem fs)
+      throws IOException {
+    if (artifact == null) {
+      throw new IllegalArgumentException(
+          RestApiErrorMessages.ERROR_ARTIFACT_INVALID);
+    }
+    if (StringUtils.isEmpty(artifact.getId())) {
+      throw new IllegalArgumentException(
+          RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID);
+    }
+    Path p = new Path(artifact.getId());
+    if (!fs.exists(p)) {
+      throw new IllegalArgumentException( "Artifact tarball does not exist "
+          + artifact.getId());
+    }
+  }
+
+  @Override
+  protected void validateConfigFile(ConfigFile configFile, FileSystem
+      fileSystem) throws IOException {
+    // validate dest_file is not absolute
+    if (Paths.get(configFile.getDestFile()).isAbsolute()) {
+      throw new IllegalArgumentException(
+          "Dest_file must not be absolute path: " + configFile.getDestFile());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderFactory.java
new file mode 100644
index 0000000..d9b7450
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers.tarball;
+
+import org.apache.slider.providers.AbstractClientProvider;
+import org.apache.slider.providers.ProviderService;
+import org.apache.slider.providers.SliderProviderFactory;
+
+public class TarballProviderFactory extends SliderProviderFactory {
+  private static final SliderProviderFactory FACTORY = new
+      TarballProviderFactory();
+
+  private TarballProviderFactory() {
+  }
+
+  private static class Client {
+    static final AbstractClientProvider PROVIDER = new TarballClientProvider();
+  }
+
+  private static class Server {
+    static final ProviderService PROVIDER = new TarballProviderService();
+  }
+
+  @Override
+  public AbstractClientProvider createClientProvider() {
+    return Client.PROVIDER;
+  }
+
+  @Override
+  public ProviderService createServerProvider() {
+    return Server.PROVIDER;
+  }
+
+  public static SliderProviderFactory getInstance() {
+    return FACTORY;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderService.java
new file mode 100644
index 0000000..65a55f0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderService.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers.tarball;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.core.launch.ContainerLauncher;
+import org.apache.slider.providers.AbstractProviderService;
+
+import java.io.IOException;
+
+public class TarballProviderService extends AbstractProviderService {
+
+  protected TarballProviderService() {
+    super(TarballProviderService.class.getSimpleName());
+  }
+
+  @Override
+  public void processArtifact(ContainerLauncher launcher, Component
+      component, SliderFileSystem fileSystem) throws IOException {
+    Path artifact =  new Path(component.getArtifact().getId());
+    if (!fileSystem.isFile(artifact)) {
+      throw new IOException("Package doesn't exist as a resource: " +
+          artifact.toString());
+    }
+    log.info("Adding resource {}", artifact.toString());
+    LocalResourceType type = LocalResourceType.ARCHIVE;
+    LocalResource packageResource = fileSystem.createAmResource(
+        artifact, type);
+    launcher.addLocalResource(APP_INSTALL_DIR, packageResource);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
index 7c096c7..aa84940 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
@@ -27,7 +27,7 @@ import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.core.launch.ContainerLauncher;
 import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.providers.ProviderService;
-import org.apache.slider.providers.agent.AgentKeys;
+import org.apache.slider.providers.SliderProviderFactory;
 import org.apache.slider.server.appmaster.actions.ActionStartContainer;
 import org.apache.slider.server.appmaster.actions.QueueAccess;
 import org.apache.slider.server.appmaster.state.ContainerAssignment;
@@ -43,6 +43,8 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.slider.common.SliderKeys.KEY_CONTAINER_LAUNCH_DELAY;
+
 /**
  * A service for launching containers
  */
@@ -60,11 +62,6 @@ public class RoleLaunchService
   private final QueueAccess actionQueue;
 
   /**
-   * Provider building up the command
-   */
-  private final ProviderService provider;
-  
-  /**
    * Filesystem to use for the launch
    */
   private final SliderFileSystem fs;
@@ -75,16 +72,14 @@ public class RoleLaunchService
   /**
    * Construct an instance of the launcher
    * @param queueAccess
-   * @param provider the provider
    * @param fs filesystem
    * @param envVars environment variables
    */
-  public RoleLaunchService(QueueAccess queueAccess, ProviderService provider,
-      SliderFileSystem fs, Map<String, String> envVars) {
+  public RoleLaunchService(QueueAccess queueAccess, SliderFileSystem fs,
+      Map<String, String> envVars) {
     super(ROLE_LAUNCH_SERVICE);
     this.actionQueue = queueAccess;
     this.fs = fs;
-    this.provider = provider;
     this.envVars = envVars;
   }
 
@@ -167,11 +162,13 @@ public class RoleLaunchService
         instance.roleId = role.id;
         instance.environment = envDescription;
 
+        ProviderService provider = SliderProviderFactory.getProviderService(
+            role.component.getArtifact());
         provider.buildContainerLaunchContext(containerLauncher, application,
             container, role, fs, instance);
 
         long delay = role.component.getConfiguration()
-            .getPropertyLong(AgentKeys.KEY_CONTAINER_LAUNCH_DELAY, 0);
+            .getPropertyLong(KEY_CONTAINER_LAUNCH_DELAY, 0);
         long maxDelay = getConfig()
             .getLong(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS,
                 YarnConfiguration.DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
index 02c9198..84dde08 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -64,7 +64,6 @@ import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
-import org.apache.hadoop.yarn.client.api.TimelineClient;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
@@ -342,7 +341,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    * ProviderService of this cluster
    */
   @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
-  private ProviderService providerService;
+  private List<ProviderService> providers = new ArrayList<>();
 
   /**
    * The YARN registry service
@@ -523,8 +522,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
   public Configuration bindArgs(Configuration config, String... args) throws Exception {
     // let the superclass process it
     Configuration superConf = super.bindArgs(config, args);
-    // add the slider XML config
-    ConfigHelper.injectSliderXMLResource();
 
     //yarn-ify
     YarnConfiguration yarnConfiguration = new YarnConfiguration(
@@ -603,12 +600,15 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     // obtain security state
     // set the global security flag for the instance definition
 
-    //get our provider
-    SliderProviderFactory factory =
-      SliderProviderFactory.createSliderProviderFactory("docker");
-    providerService = factory.createServerProvider();
-    // init the provider BUT DO NOT START IT YET
-    initAndAddService(providerService);
+    // initialize our providers
+    for (Component component : application.getComponents()) {
+      SliderProviderFactory factory = SliderProviderFactory
+          .createSliderProviderFactory(component.getArtifact());
+      ProviderService providerService = factory.createServerProvider();
+      // init the provider BUT DO NOT START IT YET
+      initAndAddService(providerService);
+      providers.add(providerService);
+    }
 
     InetSocketAddress rmSchedulerAddress = SliderUtils.getRmSchedulerAddress(serviceConf);
     log.info("RM is at {}", rmSchedulerAddress);
@@ -667,7 +667,9 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
         serviceTimelinePublisher.init(getConfig());
         serviceTimelinePublisher.start();
 
-        providerService.setServiceTimelinePublisher(serviceTimelinePublisher);
+        for (ProviderService providerService : providers) {
+          providerService.setServiceTimelinePublisher(serviceTimelinePublisher);
+        }
         appState.setServiceTimelinePublisher(serviceTimelinePublisher);
         log.info("ServiceTimelinePublisher started.");
       }
@@ -707,7 +709,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       WebAppApiImpl webAppApi =
           new WebAppApiImpl(
               stateForProviders,
-              providerService, registryOperations,
+              registryOperations,
               metricsAndMonitoring,
               actionQueues);
       initAMFilterOptions(serviceConf);
@@ -843,13 +845,14 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
     //launcher service
     launchService = new RoleLaunchService(actionQueues,
-                                          providerService,
                                           fs, envVars);
 
     deployChildService(launchService);
 
     //Give the provider access to the state, and AM
-    providerService.setAMState(stateForProviders);
+    for (ProviderService providerService : providers) {
+      providerService.setAMState(stateForProviders);
+    }
 
     // chaos monkey
     maybeStartMonkey();
@@ -1119,7 +1122,9 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
         SliderKeys.APP_TYPE,
         instanceName,
         appAttemptID);
-    providerService.bindToYarnRegistry(yarnRegistryOperations);
+    for (ProviderService providerService : providers) {
+      providerService.bindToYarnRegistry(yarnRegistryOperations);
+    }
 
     // Yarn registry
     ServiceRecord serviceRecord = new ServiceRecord();
@@ -1859,7 +1864,9 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
   protected synchronized void launchProviderService()
       throws IOException, SliderException {
     // didn't start, so don't register
-    providerService.start();
+    for (ProviderService providerService : providers) {
+      providerService.start();
+    }
     // and send the started event ourselves
     eventCallbackEvent(null);
   }
@@ -1959,19 +1966,23 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       ContainerStatus containerStatus) {
     LOG_YARN.debug("Container Status: id={}, status={}", containerId,
         containerStatus);
+    RoleInstance cinfo = appState.getOwnedContainer(containerId);
+    if (cinfo == null) {
+      LOG_YARN.error("Owned container not found for {}", containerId);
+      return;
+    }
+    ProviderService providerService = SliderProviderFactory
+        .getProviderService(cinfo.providerRole.component.getArtifact());
     if (providerService.processContainerStatus(containerId, containerStatus)) {
       try {
         Thread.sleep(1000);
       } catch (InterruptedException e) {
       }
-      RoleInstance cinfo = appState.getOwnedContainer(containerId);
-      if (cinfo != null) {
-        LOG_YARN.info("Re-requesting status for role {}, {}",
-            cinfo.role, containerId);
-        //trigger another async container status
-        nmClientAsync.getContainerStatusAsync(containerId,
-            cinfo.container.getNodeId());
-      }
+      LOG_YARN.info("Re-requesting status for role {}, {}",
+          cinfo.role, containerId);
+      //trigger another async container status
+      nmClientAsync.getContainerStatusAsync(containerId,
+          cinfo.container.getNodeId());
     } else if (timelineServiceEnabled) {
       RoleInstance instance = appState.getOwnedContainer(containerId);
       if (instance != null) {
@@ -1997,11 +2008,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     LOG_YARN.warn("Failed to stop Container {}", containerId);
   }
 
-
-  public ProviderService getProviderService() {
-    return providerService;
-  }
-
   /**
    * Queue an action for immediate execution in the executor thread
    * @param action action to execute

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApi.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApi.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApi.java
index 094726d..02f3f0c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApi.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApi.java
@@ -17,7 +17,6 @@
 package org.apache.slider.server.appmaster.web;
 
 import org.apache.hadoop.registry.client.api.RegistryOperations;
-import org.apache.slider.providers.ProviderService;
 import org.apache.slider.server.appmaster.actions.QueueAccess;
 import org.apache.slider.server.appmaster.management.MetricsAndMonitoring;
 import org.apache.slider.server.appmaster.state.AppState;
@@ -34,11 +33,6 @@ public interface WebAppApi {
   StateAccessForProviders getAppState();
   
   /**
-   * The {@link ProviderService} for the current cluster
-   */
-  ProviderService getProviderService();
-  
-  /**
    * Registry operations accessor
    * @return registry access
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApiImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApiImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApiImpl.java
index fd9381c..f88f501 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApiImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApiImpl.java
@@ -17,7 +17,6 @@
 package org.apache.slider.server.appmaster.web;
 
 import org.apache.hadoop.registry.client.api.RegistryOperations;
-import org.apache.slider.providers.ProviderService;
 import org.apache.slider.server.appmaster.actions.QueueAccess;
 import org.apache.slider.server.appmaster.management.MetricsAndMonitoring;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
@@ -33,21 +32,18 @@ public class WebAppApiImpl implements WebAppApi {
   private static final Logger log = LoggerFactory.getLogger(WebAppApiImpl.class);
 
   protected final StateAccessForProviders appState;
-  protected final ProviderService provider;
   private final RegistryOperations registryOperations;
   private final MetricsAndMonitoring metricsAndMonitoring;
   private final QueueAccess queues;
 
   public WebAppApiImpl(StateAccessForProviders appState,
-      ProviderService provider, RegistryOperations registryOperations,
+      RegistryOperations registryOperations,
       MetricsAndMonitoring metricsAndMonitoring, QueueAccess queues) {
     checkNotNull(appState);
-    checkNotNull(provider);
     this.queues = queues;
 
     this.registryOperations = registryOperations;
     this.appState = appState;
-    this.provider = provider;
     this.metricsAndMonitoring = metricsAndMonitoring;
   }
 
@@ -57,11 +53,6 @@ public class WebAppApiImpl implements WebAppApi {
   }
 
   @Override
-  public ProviderService getProviderService() {
-    return provider;
-  }
-
-  @Override
   public RegistryOperations getRegistryOperations() {
     return registryOperations;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/SliderHamletBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/SliderHamletBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/SliderHamletBlock.java
index 82d7c8f..5f44bda 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/SliderHamletBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/SliderHamletBlock.java
@@ -19,7 +19,6 @@
 package org.apache.slider.server.appmaster.web.view;
 
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
-import org.apache.slider.providers.ProviderService;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
 import org.apache.slider.server.appmaster.web.WebAppApi;
 import org.apache.slider.server.appmaster.web.rest.RestPaths;
@@ -33,12 +32,10 @@ import static org.apache.slider.server.appmaster.web.rest.RestPaths.SLIDER_PATH_
 public abstract class SliderHamletBlock extends HtmlBlock  {
 
   protected final StateAccessForProviders appState;
-  protected final ProviderService providerService;
   protected final RestPaths restPaths = new RestPaths();
   
   public SliderHamletBlock(WebAppApi slider) {
     this.appState = slider.getAppState();
-    this.providerService = slider.getProviderService();
   }
 
   protected String rootPath(String absolutePath) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/slider.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/slider.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/slider.xml
deleted file mode 100644
index 96bfe0f..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/slider.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~  or more contributor license agreements.  See the NOTICE file
-  ~  distributed with this work for additional information
-  ~  regarding copyright ownership.  The ASF licenses this file
-  ~  to you under the Apache License, Version 2.0 (the
-  ~  "License"); you may not use this file except in compliance
-  ~  with the License.  You may obtain a copy of the License at
-  ~
-  ~       http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~  Unless required by applicable law or agreed to in writing, software
-  ~  distributed under the License is distributed on an "AS IS" BASIS,
-  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~  See the License for the specific language governing permissions and
-  ~  limitations under the License.
-  -->
-
-<configuration>
-  <property>
-    <name>slider.config.loaded</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>slider.provider.docker</name>
-    <value>org.apache.slider.providers.docker.DockerProviderFactory</value>
-  </property>
-</configuration>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: YARN-6255. Refactor yarn-native-services framework. Contributed by Jian He

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
index 6f54959..e891a27 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
@@ -18,12 +18,12 @@
 
 package org.apache.slider.server.appmaster.state;
 
-import com.codahale.metrics.Metric;
-import com.codahale.metrics.MetricRegistry;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -31,42 +31,35 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.slider.api.ClusterDescription;
-import org.apache.slider.api.ClusterDescriptionKeys;
-import org.apache.slider.api.ClusterDescriptionOperations;
 import org.apache.slider.api.ClusterNode;
 import org.apache.slider.api.InternalKeys;
-import org.apache.slider.api.ResourceKeys;
 import org.apache.slider.api.StatusKeys;
+import org.apache.slider.api.proto.Messages;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.ApplicationState;
+import org.apache.slider.api.resource.Component;
 import org.apache.slider.api.types.ApplicationLivenessInformation;
 import org.apache.slider.api.types.ComponentInformation;
 import org.apache.slider.api.types.RoleStatistics;
 import org.apache.slider.common.SliderExitCodes;
 import org.apache.slider.common.SliderKeys;
-import org.apache.slider.common.tools.ConfigHelper;
 import org.apache.slider.common.tools.SliderUtils;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTree;
-import org.apache.slider.core.conf.ConfTreeOperations;
-import org.apache.slider.core.conf.MapOperations;
 import org.apache.slider.core.exceptions.BadClusterStateException;
 import org.apache.slider.core.exceptions.BadConfigException;
 import org.apache.slider.core.exceptions.ErrorStrings;
 import org.apache.slider.core.exceptions.NoSuchNodeException;
 import org.apache.slider.core.exceptions.SliderInternalStateException;
 import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
-import org.apache.slider.core.persist.AggregateConfSerDeser;
-import org.apache.slider.core.persist.ConfTreeSerDeser;
 import org.apache.slider.providers.PlacementPolicy;
 import org.apache.slider.providers.ProviderRole;
-import org.apache.slider.server.appmaster.management.LongGauge;
 import org.apache.slider.server.appmaster.management.MetricsAndMonitoring;
 import org.apache.slider.server.appmaster.management.MetricsConstants;
+import org.apache.slider.server.appmaster.metrics.SliderMetrics;
 import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
 import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation;
 import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
@@ -77,7 +70,6 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -89,12 +81,10 @@ import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import static org.apache.hadoop.metrics2.lib.Interns.info;
 import static org.apache.slider.api.ResourceKeys.*;
-import static org.apache.slider.api.RoleKeys.*;
 import static org.apache.slider.api.StateValues.*;
-import static org.apache.slider.providers.docker.DockerKeys.DEFAULT_DOCKER_USE_PRIVILEGED;
-import static org.apache.slider.providers.docker.DockerKeys.DOCKER_IMAGE;
-import static org.apache.slider.providers.docker.DockerKeys.DOCKER_USE_PRIVILEGED;
+import static org.apache.slider.api.resource.ApplicationState.STARTED;
 
 /**
  * The model of all the ongoing state of a Slider AM.
@@ -117,53 +107,8 @@ public class AppState {
    */
   private boolean applicationLive = false;
 
-  /**
-   * The definition of the instance. Flexing updates the resources section
-   * This is used as a synchronization point on activities that update
-   * the CD, and also to update some of the structures that
-   * feed in to the CD
-   */
-  private AggregateConf instanceDefinition;
-
-  /**
-   * Time the instance definition snapshots were created
-   */
-  private long snapshotTime;
-
-  /**
-   * Snapshot of the instance definition. This is fully
-   * resolved.
-   */
-  private AggregateConf instanceDefinitionSnapshot;
+  private Application app;
 
-  /**
-   * Snapshot of the raw instance definition; unresolved and
-   * without any patch of an AM into it.
-   */
-  private AggregateConf unresolvedInstanceDefinition;
-
-  /**
-   * snapshot of resources as of last update time
-   */
-  private ConfTreeOperations resourcesSnapshot;
-  private ConfTreeOperations appConfSnapshot;
-  private ConfTreeOperations internalsSnapshot;
-
-  /**
-   * This is the status, the live model
-   */
-  private ClusterDescription clusterStatus = new ClusterDescription();
-
-  /**
-   * Metadata provided by the AM for use in filling in status requests
-   */
-  private Map<String, String> applicationInfo;
-
-  /**
-   * Client properties created via the provider -static for the life
-   * of the application
-   */
-  private Map<String, String> clientProperties = new HashMap<>();
 
   /**
    * This is a template of the cluster status
@@ -180,11 +125,6 @@ public class AppState {
     new ConcurrentSkipListMap<>();
 
   /**
-   * The master node.
-   */
-  private RoleInstance appMasterNode;
-
-  /**
    * Hash map of the containers we have. This includes things that have
    * been allocated but are not live; it is a superset of the live list
    */
@@ -198,37 +138,6 @@ public class AppState {
    */
   private final ConcurrentMap<ContainerId, Container> containersBeingReleased =
     new ConcurrentHashMap<>();
-  
-  /**
-   * Counter for completed containers ( complete denotes successful or failed )
-   */
-  private final LongGauge completedContainerCount = new LongGauge();
-
-  /**
-   *   Count of failed containers
-   */
-  private final LongGauge failedContainerCount = new LongGauge();
-
-  /**
-   * # of started containers
-   */
-  private final LongGauge startedContainers = new LongGauge();
-
-  /**
-   * # of containers that failed to start 
-   */
-  private final LongGauge startFailedContainerCount = new LongGauge();
-
-  /**
-   * Track the number of surplus containers received and discarded
-   */
-  private final LongGauge surplusContainers = new LongGauge();
-
-  /**
-   * Track the number of requested containers.
-   * Important: this does not include AA requests which are yet to be issued.
-   */
-  private final LongGauge outstandingContainerRequests = new LongGauge();
 
   /**
    * Map of requested nodes. This records the command used to start it,
@@ -256,7 +165,7 @@ public class AppState {
    * Nodes that came assigned to a role above that
    * which were asked for -this appears to happen
    */
-  private final Set<ContainerId> surplusNodes = new HashSet<>();
+  private final Set<ContainerId> surplusContainers = new HashSet<>();
 
   /**
    * Map of containerID to cluster nodes, for status reports.
@@ -269,7 +178,6 @@ public class AppState {
   private final AtomicInteger completionOfUnknownContainerEvent =
     new AtomicInteger();
 
-
   /**
    * limits of container core numbers in this queue
    */
@@ -298,6 +206,7 @@ public class AppState {
   private Resource minResource;
   private Resource maxResource;
 
+  private SliderMetrics appMetrics;
   /**
    * Create an instance
    * @param recordFactory factory for YARN records
@@ -309,60 +218,6 @@ public class AppState {
     Preconditions.checkArgument(metricsAndMonitoring != null, "null metricsAndMonitoring");
     this.recordFactory = recordFactory;
     this.metricsAndMonitoring = metricsAndMonitoring;
-
-    // register any metrics
-    register(MetricsConstants.CONTAINERS_OUTSTANDING_REQUESTS, outstandingContainerRequests);
-    register(MetricsConstants.CONTAINERS_SURPLUS, surplusContainers);
-    register(MetricsConstants.CONTAINERS_STARTED, startedContainers);
-    register(MetricsConstants.CONTAINERS_COMPLETED, completedContainerCount);
-    register(MetricsConstants.CONTAINERS_FAILED, failedContainerCount);
-    register(MetricsConstants.CONTAINERS_START_FAILED, startFailedContainerCount);
-  }
-
-  private void register(String name, Metric counter) {
-    this.metricsAndMonitoring.getMetrics().register(
-        MetricRegistry.name(AppState.class, name), counter);
-  }
-
-  public long getFailedCountainerCount() {
-    return failedContainerCount.getCount();
-  }
-
-  /**
-   * Increment the count
-   */
-  public void incFailedCountainerCount() {
-    failedContainerCount.inc();
-  }
-
-  public long getStartFailedCountainerCount() {
-    return startFailedContainerCount.getCount();
-  }
-
-  /**
-   * Increment the count and return the new value
-   */
-  public void incStartedCountainerCount() {
-    startedContainers.inc();
-  }
-
-  public long getStartedCountainerCount() {
-    return startedContainers.getCount();
-  }
-
-  /**
-   * Increment the count and return the new value
-   */
-  public void incStartFailedCountainerCount() {
-    startFailedContainerCount.inc();
-  }
-
-  public AtomicInteger getCompletionOfNodeNotInLiveListEvent() {
-    return completionOfNodeNotInLiveListEvent;
-  }
-
-  public AtomicInteger getCompletionOfUnknownContainerEvent() {
-    return completionOfUnknownContainerEvent;
   }
 
 
@@ -370,13 +225,7 @@ public class AppState {
     return roleStatusMap;
   }
   
-  protected Map<String, ProviderRole> getRoleMap() {
-    return roles;
-  }
 
-  public Map<Integer, ProviderRole> getRolePriorityMap() {
-    return rolePriorityMap;
-  }
 
   private Map<ContainerId, RoleInstance> getStartingContainers() {
     return startingContainers;
@@ -396,47 +245,13 @@ public class AppState {
 
   /**
    * Get the current view of the cluster status.
-   * <p>
-   *   Calls to {@link #refreshClusterStatus()} trigger a
-   *   refresh of this field.
-   * <p>
    * This is read-only
    * to the extent that changes here do not trigger updates in the
    * application state. 
    * @return the cluster status
    */
-  public synchronized ClusterDescription getClusterStatus() {
-    return clusterStatus;
-  }
-
-  @VisibleForTesting
-  protected synchronized void setClusterStatus(ClusterDescription clusterDesc) {
-    this.clusterStatus = clusterDesc;
-  }
-
-  /**
-   * Set the instance definition -this also builds the (now obsolete)
-   * cluster specification from it.
-   * 
-   * Important: this is for early binding and must not be used after the build
-   * operation is complete. 
-   * @param definition initial definition
-   * @throws BadConfigException
-   */
-  public synchronized void setInitialInstanceDefinition(AggregateConf definition)
-      throws BadConfigException, IOException {
-    log.debug("Setting initial instance definition");
-    // snapshot the definition
-    AggregateConfSerDeser serDeser = new AggregateConfSerDeser();
-
-    unresolvedInstanceDefinition = serDeser.fromInstance(definition);
-    
-    this.instanceDefinition = serDeser.fromInstance(definition);
-    onInstanceDefinitionUpdated();
-  }
-
-  public synchronized AggregateConf getInstanceDefinition() {
-    return instanceDefinition;
+  public synchronized Application getClusterStatus() {
+    return app;
   }
 
   /**
@@ -475,58 +290,27 @@ public class AppState {
     maxResource = recordFactory.newResource(containerMaxMemory, containerMaxCores);
   }
 
-  public ConfTreeOperations getResourcesSnapshot() {
-    return resourcesSnapshot;
-  }
-
-  public ConfTreeOperations getAppConfSnapshot() {
-    return appConfSnapshot;
-  }
-
-  public ConfTreeOperations getInternalsSnapshot() {
-    return internalsSnapshot;
-  }
-
   public boolean isApplicationLive() {
     return applicationLive;
   }
 
-  public long getSnapshotTime() {
-    return snapshotTime;
-  }
-
-  public synchronized AggregateConf getInstanceDefinitionSnapshot() {
-    return instanceDefinitionSnapshot;
-  }
-
-  public AggregateConf getUnresolvedInstanceDefinition() {
-    return unresolvedInstanceDefinition;
-  }
 
   public synchronized void buildInstance(AppStateBindingInfo binding)
       throws BadClusterStateException, BadConfigException, IOException {
     binding.validate();
 
     log.debug("Building application state");
-    publishedProviderConf = binding.publishedProviderConf;
-    applicationInfo = binding.applicationInfo != null ? binding.applicationInfo
-                        : new HashMap<String, String>();
-
-    clientProperties = new HashMap<>();
     containerReleaseSelector = binding.releaseSelector;
 
-
-    Set<String> confKeys = ConfigHelper.sortedConfigKeys(publishedProviderConf);
-
-    //  Add the -site configuration properties
-    for (String key : confKeys) {
-      String val = publishedProviderConf.get(key);
-      clientProperties.put(key, val);
-    }
-
     // set the cluster specification (once its dependency the client properties
     // is out the way
-    setInitialInstanceDefinition(binding.instanceDefinition);
+    this.app = binding.application;
+    appMetrics = SliderMetrics.register(app.getName(),
+        "Metrics for service");
+    appMetrics
+        .tag("type", "Metrics type [component or service]", "service");
+    appMetrics
+        .tag("appId", "Application id for service", app.getId());
 
     //build the initial role list
     List<ProviderRole> roleList = new ArrayList<>(binding.roles);
@@ -534,51 +318,40 @@ public class AppState {
       buildRole(providerRole);
     }
 
-    ConfTreeOperations resources = instanceDefinition.getResourceOperations();
-
-    Set<String> roleNames = resources.getComponentNames();
-    for (String name : roleNames) {
+    int priority = 1;
+    for (Component component : app.getComponents()) {
+      String name = component.getName();
       if (roles.containsKey(name)) {
         continue;
       }
-      if (hasUniqueNames(resources, name)) {
-        log.info("Skipping group {}", name);
+      if (component.getUniqueComponentSupport()) {
+        log.info("Skipping group " + name + ", as it's unique component");
         continue;
       }
-      // this is a new value
-      log.info("Adding role {}", name);
-      MapOperations resComponent = resources.getComponent(name);
-      ProviderRole dynamicRole = createDynamicProviderRole(name, resComponent);
+      log.info("Adding component: " + name);
+      ProviderRole dynamicRole =
+          createComponent(name, name, component, priority++);
       buildRole(dynamicRole);
       roleList.add(dynamicRole);
     }
     //then pick up the requirements
     buildRoleRequirementsFromResources();
 
-    //set the livespan
-    MapOperations globalResOpts = instanceDefinition.getResourceOperations().getGlobalOptions();
-
-    startTimeThreshold = globalResOpts.getOptionInt(
-        InternalKeys.INTERNAL_CONTAINER_FAILURE_SHORTLIFE,
-        InternalKeys.DEFAULT_INTERNAL_CONTAINER_FAILURE_SHORTLIFE);
-
-    failureThreshold = globalResOpts.getOptionInt(
-        CONTAINER_FAILURE_THRESHOLD,
+    org.apache.slider.api.resource.Configuration conf = app.getConfiguration();
+    startTimeThreshold =
+        conf.getPropertyLong(InternalKeys.INTERNAL_CONTAINER_FAILURE_SHORTLIFE,
+            InternalKeys.DEFAULT_INTERNAL_CONTAINER_FAILURE_SHORTLIFE);
+    failureThreshold = (int) conf.getPropertyLong(CONTAINER_FAILURE_THRESHOLD,
         DEFAULT_CONTAINER_FAILURE_THRESHOLD);
-    nodeFailureThreshold = globalResOpts.getOptionInt(
-        NODE_FAILURE_THRESHOLD,
+    nodeFailureThreshold = (int) conf.getPropertyLong(NODE_FAILURE_THRESHOLD,
         DEFAULT_NODE_FAILURE_THRESHOLD);
-    initClusterStatus();
-
 
     // set up the role history
     roleHistory = new RoleHistory(roleStatusMap.values(), recordFactory);
-    roleHistory.register(metricsAndMonitoring);
     roleHistory.onStart(binding.fs, binding.historyPath);
     // trigger first node update
     roleHistory.onNodesUpdated(binding.nodeReports);
 
-
     //rebuild any live containers
     rebuildModelFromRestart(binding.liveContainers);
 
@@ -586,180 +359,57 @@ public class AppState {
     logServerURL = binding.serviceConfig.get(YarnConfiguration.YARN_LOG_SERVER_URL, "");
     //mark as live
     applicationLive = true;
-  }
-
-  public void initClusterStatus() {
-    //copy into cluster status. 
-    ClusterDescription status = ClusterDescription.copy(clusterStatusTemplate);
-    status.state = STATE_CREATED;
-    MapOperations infoOps = new MapOperations("info", status.info);
-    infoOps.mergeWithoutOverwrite(applicationInfo);
-    SliderUtils.addBuildInfo(infoOps, "status");
-
-    long now = now();
-    status.setInfoTime(StatusKeys.INFO_LIVE_TIME_HUMAN,
-                              StatusKeys.INFO_LIVE_TIME_MILLIS,
-                              now);
-    SliderUtils.setInfoTime(infoOps,
-        StatusKeys.INFO_LIVE_TIME_HUMAN,
-        StatusKeys.INFO_LIVE_TIME_MILLIS,
-        now);
-    if (0 == status.createTime) {
-      status.createTime = now;
-      SliderUtils.setInfoTime(infoOps,
-          StatusKeys.INFO_CREATE_TIME_HUMAN,
-          StatusKeys.INFO_CREATE_TIME_MILLIS,
-          now);
-    }
-    status.state = STATE_LIVE;
-
-      //set the app state to this status
-    setClusterStatus(status);
-  }
-
-  /**
-   * Build a dynamic provider role
-   * @param name name of role
-   * @return a new provider role
-   * @throws BadConfigException bad configuration
-   */
-  public ProviderRole createDynamicProviderRole(String name, MapOperations component)
-      throws BadConfigException {
-    return createDynamicProviderRole(name, name, component);
-  }
-
-  /**
-   * Build a dynamic provider role
-   * @param name name of role
-   * @param group group of role
-   * @return a new provider role
-   * @throws BadConfigException bad configuration
-   */
-  public ProviderRole createDynamicProviderRole(String name, String group, MapOperations component)
-      throws BadConfigException {
-    String priOpt = component.getMandatoryOption(COMPONENT_PRIORITY);
-    int priority = SliderUtils.parseAndValidate(
-        "value of " + name + " " + COMPONENT_PRIORITY, priOpt, 0, 1, -1);
-
-    String placementOpt = component.getOption(COMPONENT_PLACEMENT_POLICY,
-        Integer.toString(PlacementPolicy.DEFAULT));
-
-    int placement = SliderUtils.parseAndValidate(
-        "value of " + name + " " + COMPONENT_PLACEMENT_POLICY, placementOpt, 0, 0, -1);
-
-    int placementTimeout = component.getOptionInt(PLACEMENT_ESCALATE_DELAY,
-            DEFAULT_PLACEMENT_ESCALATE_DELAY_SECONDS);
-
-    ProviderRole newRole = new ProviderRole(name,
-        group,
-        priority,
-        placement,
-        getNodeFailureThresholdForRole(group),
-        placementTimeout,
-        component.getOption(YARN_LABEL_EXPRESSION, DEF_YARN_LABEL_EXPRESSION));
-    log.info("New {} ", newRole);
+    app.setState(STARTED);
+  }
+
+  //TODO WHY do we need to create the component for AM ?
+  public ProviderRole createComponent(String name, String group,
+      Component component, int priority) throws BadConfigException {
+
+    org.apache.slider.api.resource.Configuration conf =
+        component.getConfiguration();
+    long placementTimeout = conf.getPropertyLong(PLACEMENT_ESCALATE_DELAY,
+        DEFAULT_PLACEMENT_ESCALATE_DELAY_SECONDS);
+    long placementPolicy = conf.getPropertyLong(COMPONENT_PLACEMENT_POLICY,
+        PlacementPolicy.DEFAULT);
+    int threshold = (int) conf
+        .getPropertyLong(NODE_FAILURE_THRESHOLD, nodeFailureThreshold);
+    ProviderRole newRole =
+        new ProviderRole(name, group, priority, (int)placementPolicy, threshold,
+            placementTimeout, "", component);
+
+    log.info("Created a new role " + newRole);
     return newRole;
   }
 
-  /**
-   * Actions to perform when an instance definition is updated
-   * Currently: 
-   * <ol>
-   *   <li>
-   *     resolve the configuration
-   *   </li>
-   *   <li>
-   *     update the cluster spec derivative
-   *   </li>
-   * </ol>
-   *  
-   * @throws BadConfigException
-   */
-  private synchronized void onInstanceDefinitionUpdated()
-      throws BadConfigException, IOException {
-
-    log.debug("Instance definition updated");
-    //note the time 
-    snapshotTime = now();
-
-    for (String component : instanceDefinition.getResourceOperations().getComponentNames()) {
-      instanceDefinition.getAppConfOperations().getOrAddComponent(component);
-    }
-
-    // resolve references if not already done
-    instanceDefinition.resolve();
-
-    // force in the AM desired state values
-    ConfTreeOperations resources = instanceDefinition.getResourceOperations();
-
-    if (resources.getComponent(SliderKeys.COMPONENT_AM) != null) {
-      resources.setComponentOpt(
-          SliderKeys.COMPONENT_AM, COMPONENT_INSTANCES, "1");
-    }
-
-
-    //snapshot all three sectons
-    resourcesSnapshot = ConfTreeOperations.fromInstance(instanceDefinition.getResources());
-    appConfSnapshot = ConfTreeOperations.fromInstance(instanceDefinition.getAppConf());
-    internalsSnapshot = ConfTreeOperations.fromInstance(instanceDefinition.getInternal());
-    //build a new aggregate from the snapshots
-    instanceDefinitionSnapshot = new AggregateConf(resourcesSnapshot.confTree,
-                                                   appConfSnapshot.confTree,
-                                                   internalsSnapshot.confTree);
-    instanceDefinitionSnapshot.setName(instanceDefinition.getName());
-
-    clusterStatusTemplate = ClusterDescriptionOperations.buildFromInstanceDefinition(
-          instanceDefinition);
-
-    // Add the -site configuration properties
-    for (Map.Entry<String, String> prop : clientProperties.entrySet()) {
-      clusterStatusTemplate.clientProperties.put(prop.getKey(), prop.getValue());
+  public synchronized void updateComponents(
+      Messages.FlexComponentRequestProto requestProto)
+      throws BadConfigException {
+    for (Component component : app.getComponents()) {
+      if (component.getName().equals(requestProto.getName())) {
+        component
+            .setNumberOfContainers((long) requestProto.getNumberOfContainers());
+      }
     }
-
-  }
-
-  /**
-   * The resource configuration is updated -review and update state.
-   * @param resources updated resources specification
-   * @return a list of any dynamically added provider roles
-   * (purely for testing purposes)
-   */
-  @VisibleForTesting
-  public synchronized List<ProviderRole> updateResourceDefinitions(ConfTree resources)
-      throws BadConfigException, IOException {
-    log.debug("Updating resources to {}", resources);
-    // snapshot the (possibly unresolved) values
-    ConfTreeSerDeser serDeser = new ConfTreeSerDeser();
-    unresolvedInstanceDefinition.setResources(
-        serDeser.fromInstance(resources));
-    // assign another copy under the instance definition for resolving
-    // and then driving application size
-    instanceDefinition.setResources(serDeser.fromInstance(resources));
-    onInstanceDefinitionUpdated();
-
-    // propagate the role table
-    Map<String, Map<String, String>> updated = resources.components;
-    getClusterStatus().roles = SliderUtils.deepClone(updated);
-    getClusterStatus().updateTime = now();
-    return buildRoleRequirementsFromResources();
+    //TODO update cluster description
+    buildRoleRequirementsFromResources();
   }
 
   /**
    * build the role requirements from the cluster specification
    * @return a list of any dynamically added provider roles
    */
-  private List<ProviderRole> buildRoleRequirementsFromResources() throws BadConfigException {
+  private List<ProviderRole> buildRoleRequirementsFromResources()
+      throws BadConfigException {
 
     List<ProviderRole> newRoles = new ArrayList<>(0);
 
     // now update every role's desired count.
     // if there are no instance values, that role count goes to zero
-
-    ConfTreeOperations resources =
-        instanceDefinition.getResourceOperations();
-
     // Add all the existing roles
+    // component name -> number of containers
     Map<String, Integer> groupCounts = new HashMap<>();
+
     for (RoleStatus roleStatus : getRoleStatusMap().values()) {
       if (roleStatus.isExcludeFromFlexing()) {
         // skip inflexible roles, e.g AM itself
@@ -768,10 +418,11 @@ public class AppState {
       long currentDesired = roleStatus.getDesired();
       String role = roleStatus.getName();
       String roleGroup = roleStatus.getGroup();
-      int desiredInstanceCount = getDesiredInstanceCount(resources, roleGroup);
+      Component component = roleStatus.getProviderRole().component;
+      int desiredInstanceCount = component.getNumberOfContainers().intValue();
 
       int newDesired = desiredInstanceCount;
-      if (hasUniqueNames(resources, roleGroup)) {
+      if (component.getUniqueComponentSupport()) {
         Integer groupCount = 0;
         if (groupCounts.containsKey(roleGroup)) {
           groupCount = groupCounts.get(roleGroup);
@@ -793,56 +444,54 @@ public class AppState {
       if (currentDesired != newDesired) {
         log.info("Role {} flexed from {} to {}", role, currentDesired,
             newDesired);
-        roleStatus.setDesired(newDesired);
+        setDesiredContainers(roleStatus, newDesired);
       }
     }
 
     // now the dynamic ones. Iterate through the the cluster spec and
     // add any role status entries not in the role status
-    Set<String> roleNames = resources.getComponentNames();
-    for (String name : roleNames) {
+
+    List<RoleStatus> list = new ArrayList<>(getRoleStatusMap().values());
+    for (RoleStatus roleStatus : list) {
+      String name = roleStatus.getName();
+      Component component = roleStatus.getProviderRole().component;
       if (roles.containsKey(name)) {
         continue;
       }
-      if (hasUniqueNames(resources, name)) {
+      if (component.getUniqueComponentSupport()) {
         // THIS NAME IS A GROUP
-        int desiredInstanceCount = getDesiredInstanceCount(resources, name);
+        int desiredInstanceCount = component.getNumberOfContainers().intValue();
         Integer groupCount = 0;
         if (groupCounts.containsKey(name)) {
           groupCount = groupCounts.get(name);
         }
         for (int i = groupCount + 1; i <= desiredInstanceCount; i++) {
-          int priority = resources.getComponentOptInt(name, COMPONENT_PRIORITY, i);
+          int priority = roleStatus.getPriority();
           // this is a new instance of an existing group
           String newName = String.format("%s%d", name, i);
           int newPriority = getNewPriority(priority + i - 1);
           log.info("Adding new role {}", newName);
-          MapOperations component = resources.getComponent(name,
-              Collections.singletonMap(COMPONENT_PRIORITY,
-                  Integer.toString(newPriority)));
-          if (component == null) {
-            throw new BadConfigException("Component is null for name = " + name
-                + ", newPriority =" + newPriority);
-          }
-          ProviderRole dynamicRole = createDynamicProviderRole(newName, name, component);
-          RoleStatus roleStatus = buildRole(dynamicRole);
-          roleStatus.setDesired(1);
-          log.info("New role {}", roleStatus);
+          ProviderRole dynamicRole =
+              createComponent(newName, name, component, newPriority);
+          RoleStatus newRole = buildRole(dynamicRole);
+          incDesiredContainers(newRole);
+          log.info("New role {}", newRole);
           if (roleHistory != null) {
-            roleHistory.addNewRole(roleStatus);
+            roleHistory.addNewRole(newRole);
           }
           newRoles.add(dynamicRole);
         }
       } else {
         // this is a new value
         log.info("Adding new role {}", name);
-        MapOperations component = resources.getComponent(name);
-        ProviderRole dynamicRole = createDynamicProviderRole(name, component);
-        RoleStatus roleStatus = buildRole(dynamicRole);
-        roleStatus.setDesired(getDesiredInstanceCount(resources, name));
-        log.info("New role {}", roleStatus);
+        ProviderRole dynamicRole =
+            createComponent(name, name, component, roleStatus.getPriority());
+        RoleStatus newRole = buildRole(dynamicRole);
+        incDesiredContainers(roleStatus,
+            component.getNumberOfContainers().intValue());
+        log.info("New role {}", newRole);
         if (roleHistory != null) {
-          roleHistory.addNewRole(roleStatus);
+          roleHistory.addNewRole(newRole);
         }
         newRoles.add(dynamicRole);
       }
@@ -861,37 +510,6 @@ public class AppState {
   }
 
   /**
-   * Get the desired instance count of a role, rejecting negative values
-   * @param resources resource map
-   * @param roleGroup role group
-   * @return the instance count
-   * @throws BadConfigException if the count is negative
-   */
-  private int getDesiredInstanceCount(ConfTreeOperations resources,
-      String roleGroup) throws BadConfigException {
-    int desiredInstanceCount =
-      resources.getComponentOptInt(roleGroup, COMPONENT_INSTANCES, 0);
-
-    if (desiredInstanceCount < 0) {
-      log.error("Role {} has negative desired instances : {}", roleGroup,
-          desiredInstanceCount);
-      throw new BadConfigException(
-          "Negative instance count (%) requested for component %s",
-          desiredInstanceCount, roleGroup);
-    }
-    return desiredInstanceCount;
-  }
-
-  private Boolean hasUniqueNames(ConfTreeOperations resources, String group) {
-    MapOperations component = resources.getComponent(group);
-    if (component == null) {
-      log.info("Component was null for {} when checking unique names", group);
-      return Boolean.FALSE;
-    }
-    return component.getOptionBool(UNIQUE_NAMES, Boolean.FALSE);
-  }
-
-  /**
    * Add knowledge of a role.
    * This is a build-time operation that is not synchronized, and
    * should be used while setting up the system state -before servicing
@@ -923,66 +541,9 @@ public class AppState {
    */
   private void buildRoleResourceRequirements() {
     for (RoleStatus role : roleStatusMap.values()) {
-      role.setResourceRequirements(
-          buildResourceRequirements(role, recordFactory.newResource()));
+      role.setResourceRequirements(buildResourceRequirements(role));
     }
   }
-
-  /**
-   * build up the special master node, which lives
-   * in the live node set but has a lifecycle bonded to the AM
-   * @param containerId the AM master
-   * @param host hostname
-   * @param amPort port
-   * @param nodeHttpAddress http address: may be null
-   */
-  public void buildAppMasterNode(ContainerId containerId,
-                                 String host,
-                                 int amPort,
-                                 String nodeHttpAddress) {
-    Container container = new ContainerPBImpl();
-    container.setId(containerId);
-    NodeId nodeId = NodeId.newInstance(host, amPort);
-    container.setNodeId(nodeId);
-    container.setNodeHttpAddress(nodeHttpAddress);
-    RoleInstance am = new RoleInstance(container);
-    am.role = SliderKeys.COMPONENT_AM;
-    am.group = SliderKeys.COMPONENT_AM;
-    am.roleId = SliderKeys.ROLE_AM_PRIORITY_INDEX;
-    am.createTime =now();
-    am.startTime = am.createTime;
-    appMasterNode = am;
-    //it is also added to the set of live nodes
-    getLiveContainers().put(containerId, am);
-    putOwnedContainer(containerId, am);
-
-    // patch up the role status
-    RoleStatus roleStatus = roleStatusMap.get(SliderKeys.ROLE_AM_PRIORITY_INDEX);
-    roleStatus.setDesired(1);
-    roleStatus.incActual();
-    roleStatus.incStarted();
-  }
-
-  /**
-   * Note that the master node has been launched,
-   * though it isn't considered live until any forked
-   * processes are running. It is NOT registered with
-   * the role history -the container is incomplete
-   * and it will just cause confusion
-   */
-  public void noteAMLaunched() {
-    getLiveContainers().put(appMasterNode.getContainerId(), appMasterNode);
-  }
-
-  /**
-   * AM declares ourselves live in the cluster description.
-   * This is meant to be triggered from the callback
-   * indicating the spawned process is up and running.
-   */
-  public void noteAMLive() {
-    appMasterNode.state = STATE_LIVE;
-  }
-
   /**
    * Look up the status entry of a role or raise an exception
    * @param key role ID
@@ -1008,24 +569,6 @@ public class AppState {
     return lookupRoleStatus(ContainerPriority.extractRole(c));
   }
 
-  /**
-   * Get a deep clone of the role status list. Concurrent events may mean this
-   * list (or indeed, some of the role status entries) may be inconsistent
-   * @return a snapshot of the role status entries
-   */
-  public List<RoleStatus> cloneRoleStatusList() {
-    Collection<RoleStatus> statuses = roleStatusMap.values();
-    List<RoleStatus> statusList = new ArrayList<>(statuses.size());
-    try {
-      for (RoleStatus status : statuses) {
-        statusList.add((RoleStatus)(status.clone()));
-      }
-    } catch (CloneNotSupportedException e) {
-      log.warn("Unexpected cloning failure: {}", e, e);
-    }
-    return statusList;
-  }
-
 
   /**
    * Look up a role in the map
@@ -1278,8 +821,6 @@ public class AppState {
     }
     instance.released = true;
     containersBeingReleased.put(id, instance.container);
-    RoleStatus role = lookupRoleStatus(instance.roleId);
-    role.incReleasing();
     roleHistory.onContainerReleaseSubmitted(container);
   }
 
@@ -1292,10 +833,10 @@ public class AppState {
    * @return the container request to submit or null if there is none
    */
   private AMRMClient.ContainerRequest createContainerRequest(RoleStatus role) {
+    incPendingContainers(role);
     if (role.isAntiAffinePlacement()) {
       return createAAContainerRequest(role);
     } else {
-      incrementRequestCount(role);
       OutstandingRequest request = roleHistory.requestContainerForRole(role);
       if (request != null) {
         return request.getIssuedRequest();
@@ -1318,69 +859,69 @@ public class AppState {
     if (request == null) {
       return null;
     }
-    incrementRequestCount(role);
     role.setOutstandingAArequest(request);
     return request.getIssuedRequest();
   }
 
-  /**
-   * Increment the request count of a role.
-   * <p>
-   *   Also updates application state counters
-   * @param role role being requested.
-   */
-  protected void incrementRequestCount(RoleStatus role) {
-    role.incRequested();
-    incOutstandingContainerRequests();
+  private void incPendingContainers(RoleStatus role) {
+    role.getComponentMetrics().containersPending.incr();
+    appMetrics.containersPending.incr();
   }
 
-  /**
-   * Inc #of outstanding requests.
-   */
-  private void incOutstandingContainerRequests() {
-     outstandingContainerRequests.inc();
+  private void decPendingContainers(RoleStatus role) {
+    decPendingContainers(role, 1);
   }
 
-  /**
-   * Decrement the number of outstanding requests. This never goes below zero.
-   */
-  private void decOutstandingContainerRequests() {
-    synchronized (outstandingContainerRequests) {
-      if (outstandingContainerRequests.getCount() > 0) {
-        // decrement but never go below zero
-        outstandingContainerRequests.dec();
-      }
-    }
+  private void decPendingContainers(RoleStatus role, int n) {
+    role.getComponentMetrics().containersPending.decr(n);;
+    appMetrics.containersPending.decr(n);
   }
 
 
-  /**
-   * Get the value of a YARN requirement (cores, RAM, etc).
-   * These are returned as integers, but there is special handling of the 
-   * string {@link ResourceKeys#YARN_RESOURCE_MAX}, which triggers
-   * the return of the maximum value.
-   * @param group component to get from
-   * @param option option name
-   * @param defVal default value
-   * @param maxVal value to return if the max val is requested
-   * @return parsed value
-   * @throws NumberFormatException if the role could not be parsed.
-   */
-  private int getResourceRequirement(ConfTreeOperations resources,
-                                     String group,
-                                     String option,
-                                     int defVal,
-                                     int maxVal) {
+  private void incRunningContainers(RoleStatus role) {
+    role.getComponentMetrics().containersRunning.incr();;
+    appMetrics.containersRunning.incr();
+  }
 
-    String val = resources.getComponentOpt(group, option,
-        Integer.toString(defVal));
-    Integer intVal;
-    if (YARN_RESOURCE_MAX.equals(val)) {
-      intVal = maxVal;
-    } else {
-      intVal = Integer.decode(val);
+  private void decRunningContainers(RoleStatus role) {
+    role.getComponentMetrics().containersRunning.decr();;
+    appMetrics.containersRunning.decr();
+  }
+
+  private void setDesiredContainers(RoleStatus role, int n) {
+    role.getComponentMetrics().containersDesired.set(n);
+    appMetrics.containersDesired.set(n);
+  }
+
+  private void incDesiredContainers(RoleStatus role) {
+    role.getComponentMetrics().containersDesired.incr();
+    appMetrics.containersDesired.incr();
+  }
+
+  private void incDesiredContainers(RoleStatus role, int n) {
+    role.getComponentMetrics().containersDesired.incr(n);
+    appMetrics.containersDesired.incr(n);
+  }
+
+  private void incCompletedContainers(RoleStatus role) {
+    role.getComponentMetrics().containersCompleted.incr();
+    appMetrics.containersCompleted.incr();
+  }
+
+  private void incFailedContainers(RoleStatus role, ContainerOutcome outcome) {
+    role.getComponentMetrics().containersFailed.incr();
+    appMetrics.containersFailed.incr();
+    switch (outcome) {
+    case Preempted:
+      appMetrics.containersPreempted.incr();
+      role.getComponentMetrics().containersPreempted.incr();
+      break;
+    case Failed:
+      appMetrics.failedSinceLastThreshold.incr();
+      break;
+    default:
+      break;
     }
-    return intVal;
   }
 
   /**
@@ -1388,26 +929,28 @@ public class AppState {
    * cluster specification, including substituing max allowed values
    * if the specification asked for it.
    * @param role role
-   * @param capability capability to set up. A new one may be created
    * during normalization
    */
-  public Resource buildResourceRequirements(RoleStatus role, Resource capability) {
+  public Resource buildResourceRequirements(RoleStatus role) {
     // Set up resource requirements from role values
     String name = role.getName();
-    String group = role.getGroup();
-    ConfTreeOperations resources = getResourcesSnapshot();
-    int cores = getResourceRequirement(resources,
-                                       group,
-                                       YARN_CORES,
-                                       DEF_YARN_CORES,
-                                       containerMaxCores);
-    capability.setVirtualCores(cores);
-    int ram = getResourceRequirement(resources, group,
-                                     YARN_MEMORY,
-                                     DEF_YARN_MEMORY,
-                                     containerMaxMemory);
-    capability.setMemory(ram);
-    log.debug("Component {} has RAM={}, vCores ={}", name, ram, cores);
+    Component component = role.getProviderRole().component;
+    if (component == null) {
+      // this is for AM container
+      // TODO why do we need to create the component for AM ?
+      return Resource.newInstance(1, 512);
+    }
+    int cores = Math.min(containerMaxCores, component.getResource().getCpus());
+    if (cores <= 0) {
+      cores = DEF_YARN_CORES;
+    }
+    long mem = Math.min(containerMaxMemory,
+        Long.parseLong(component.getResource().getMemory()));
+    if (mem <= 0) {
+      mem = DEF_YARN_MEMORY;
+    }
+    Resource capability = Resource.newInstance(mem, cores);
+    log.debug("Component {} has RAM={}, vCores ={}", name, mem, cores);
     Resource normalized = recordFactory.normalize(capability, minResource,
         maxResource);
     if (!Resources.equals(normalized, capability)) {
@@ -1459,7 +1002,6 @@ public class AppState {
    */
   @VisibleForTesting
   public RoleInstance innerOnNodeManagerContainerStarted(ContainerId containerId) {
-    incStartedCountainerCount();
     RoleInstance instance = getOwnedContainer(containerId);
     if (instance == null) {
       //serious problem
@@ -1477,8 +1019,6 @@ public class AppState {
         "Container "+ containerId +" is already started");
     }
     instance.state = STATE_LIVE;
-    RoleStatus roleStatus = lookupRoleStatus(instance.roleId);
-    roleStatus.incStarted();
     Container container = instance.container;
     addLaunchedContainer(container, instance);
     return instance;
@@ -1497,8 +1037,6 @@ public class AppState {
   public synchronized void onNodeManagerContainerStartFailed(ContainerId containerId,
                                                              Throwable thrown) {
     removeOwnedContainer(containerId);
-    incFailedCountainerCount();
-    incStartFailedCountainerCount();
     RoleInstance instance = getStartingContainers().remove(containerId);
     if (null != instance) {
       RoleStatus roleStatus = lookupRoleStatus(instance.roleId);
@@ -1509,9 +1047,10 @@ public class AppState {
         text = "container start failure";
       }
       instance.diagnostics = text;
-      roleStatus.noteFailed(true, text, ContainerOutcome.Failed);
+      roleStatus.noteFailed(text);
       getFailedContainers().put(containerId, instance);
       roleHistory.onNodeManagerContainerStartFailed(instance.container);
+      incFailedContainers(roleStatus, ContainerOutcome.Failed);
     }
   }
 
@@ -1607,7 +1146,8 @@ public class AppState {
    * @param status the node that has just completed
    * @return NodeCompletionResult
    */
-  public synchronized NodeCompletionResult onCompletedNode(ContainerStatus status) {
+  public synchronized NodeCompletionResult onCompletedContainer(
+      ContainerStatus status) {
     ContainerId containerId = status.getContainerId();
     NodeCompletionResult result = new NodeCompletionResult();
     RoleInstance roleInstance;
@@ -1618,18 +1158,16 @@ public class AppState {
       log.info("Container was queued for release : {}", containerId);
       Container container = containersBeingReleased.remove(containerId);
       RoleStatus roleStatus = lookupRoleStatus(container);
-      long releasing = roleStatus.decReleasing();
-      long actual = roleStatus.decActual();
-      long completedCount = roleStatus.incCompleted();
-      log.info("decrementing role count for role {} to {}; releasing={}, completed={}",
+      decRunningContainers(roleStatus);
+      incCompletedContainers(roleStatus);
+      log.info("decrementing role count for role {} to {}; completed={}",
           roleStatus.getName(),
-          actual,
-          releasing,
-          completedCount);
+          roleStatus.getComponentMetrics().containersRunning.value(),
+          roleStatus.getComponentMetrics().containersCompleted.value());
       result.outcome = ContainerOutcome.Completed;
       roleHistory.onReleaseCompleted(container);
 
-    } else if (surplusNodes.remove(containerId)) {
+    } else if (surplusContainers.remove(containerId)) {
       //its a surplus one being purged
       result.surplusNode = true;
     } else {
@@ -1640,8 +1178,8 @@ public class AppState {
 
       roleInstance = removeOwnedContainer(containerId);
       if (roleInstance != null) {
-        //it was active, move it to failed 
-        incFailedCountainerCount();
+        RoleStatus roleStatus = lookupRoleStatus(roleInstance.roleId);
+        incFailedContainers(roleStatus, result.outcome);
         failedContainers.put(containerId, roleInstance);
       } else {
         // the container may have been noted as failed already, so look
@@ -1653,8 +1191,8 @@ public class AppState {
         String rolename = roleInstance.role;
         log.info("Failed container in role[{}] : {}", roleId, rolename);
         try {
-          RoleStatus roleStatus = lookupRoleStatus(roleId);
-          roleStatus.decActual();
+          RoleStatus roleStatus = lookupRoleStatus(roleInstance.roleId);
+          decRunningContainers(roleStatus);
           boolean shortLived = isShortLived(roleInstance);
           String message;
           Container failedContainer = roleInstance.container;
@@ -1670,8 +1208,10 @@ public class AppState {
           } else {
             message = String.format("Failure %s (%d)", containerId, exitStatus);
           }
-          roleStatus.noteFailed(shortLived, message, result.outcome);
-          long failed = roleStatus.getFailed();
+          roleStatus.noteFailed(message);
+          incFailedContainers(roleStatus, result.outcome);
+          long failed =
+              roleStatus.getComponentMetrics().containersFailed.value();
           log.info("Current count of failed role[{}] {} =  {}",
               roleId, rolename, failed);
           if (failedContainer != null) {
@@ -1761,7 +1301,7 @@ public class AppState {
     float actual = 0;
     for (RoleStatus role : getRoleStatusMap().values()) {
       desired += role.getDesired();
-      actual += role.getActual();
+      actual += role.getRunning();
     }
     if (desired == 0) {
       percentage = 100;
@@ -1771,29 +1311,26 @@ public class AppState {
     return percentage;
   }
 
+
   /**
    * Update the cluster description with the current application state
    */
 
-  public ClusterDescription refreshClusterStatus() {
-    return refreshClusterStatus(null);
-  }
+  public synchronized Application refreshClusterStatus() {
+
+    //TODO replace ClusterDescription with Application + related statistics
+    //TODO build container stats
+    app.setState(ApplicationState.STARTED);
+    return app;
+/*
+    return app;
 
-  /**
-   * Update the cluster description with the current application state
-   * @param providerStatus status from the provider for the cluster info section
-   */
-  public synchronized ClusterDescription refreshClusterStatus(Map<String, String> providerStatus) {
     ClusterDescription cd = getClusterStatus();
     long now = now();
     cd.setInfoTime(StatusKeys.INFO_STATUS_TIME_HUMAN,
                    StatusKeys.INFO_STATUS_TIME_MILLIS,
                    now);
-    if (providerStatus != null) {
-      for (Map.Entry<String, String> entry : providerStatus.entrySet()) {
-        cd.setInfo(entry.getKey(), entry.getValue());
-      }
-    }
+
     MapOperations infoOps = new MapOperations("info", cd.info);
     infoOps.mergeWithoutOverwrite(applicationInfo);
     SliderUtils.addBuildInfo(infoOps, "status");
@@ -1810,32 +1347,8 @@ public class AppState {
     cd.status = new HashMap<>();
     cd.status.put(ClusterDescriptionKeys.KEY_CLUSTER_LIVE, clusterNodes);
 
-
     for (RoleStatus role : getRoleStatusMap().values()) {
       String rolename = role.getName();
-      if (hasUniqueNames(instanceDefinition.getResourceOperations(),
-          role.getGroup())) {
-        cd.setRoleOpt(rolename, COMPONENT_PRIORITY, role.getPriority());
-        cd.setRoleOpt(rolename, ROLE_GROUP, role.getGroup());
-        MapOperations groupOptions = instanceDefinition.getResourceOperations()
-            .getComponent(role.getGroup());
-        SliderUtils.mergeMapsIgnoreDuplicateKeys(cd.getRole(rolename),
-            groupOptions.options);
-      }
-      String prefix = instanceDefinition.getAppConfOperations()
-          .getComponentOpt(role.getGroup(), ROLE_PREFIX, null);
-      if (SliderUtils.isSet(prefix)) {
-        cd.setRoleOpt(rolename, ROLE_PREFIX, SliderUtils.trimPrefix(prefix));
-      }
-      String dockerImage = instanceDefinition.getAppConfOperations()
-          .getComponentOpt(role.getGroup(), DOCKER_IMAGE, null);
-      if (SliderUtils.isSet(dockerImage)) {
-        cd.setRoleOpt(rolename, DOCKER_IMAGE, dockerImage);
-        Boolean dockerUsePrivileged = instanceDefinition.getAppConfOperations()
-            .getComponentOptBool(role.getGroup(), DOCKER_USE_PRIVILEGED,
-                DEFAULT_DOCKER_USE_PRIVILEGED);
-        cd.setRoleOpt(rolename, DOCKER_USE_PRIVILEGED, dockerUsePrivileged);
-      }
       List<String> instances = instanceMap.get(rolename);
       int nodeCount = instances != null ? instances.size(): 0;
       cd.setRoleOpt(rolename, COMPONENT_INSTANCES,
@@ -1861,7 +1374,7 @@ public class AppState {
     // liveness
     cd.liveness = getApplicationLivenessInformation();
 
-    return cd;
+    return cd;*/
   }
 
   /**
@@ -1878,29 +1391,6 @@ public class AppState {
     return li;
   }
 
-  /**
-   * Get the live statistics map
-   * @return a map of statistics values, defined in the {@link StatusKeys}
-   * keylist.
-   */
-  protected Map<String, Integer> getLiveStatistics() {
-    Map<String, Integer> sliderstats = new HashMap<>();
-    sliderstats.put(StatusKeys.STATISTICS_CONTAINERS_LIVE,
-        liveNodes.size());
-    sliderstats.put(StatusKeys.STATISTICS_CONTAINERS_COMPLETED,
-        completedContainerCount.intValue());
-    sliderstats.put(StatusKeys.STATISTICS_CONTAINERS_FAILED,
-        failedContainerCount.intValue());
-    sliderstats.put(StatusKeys.STATISTICS_CONTAINERS_STARTED,
-        startedContainers.intValue());
-    sliderstats.put(StatusKeys.STATISTICS_CONTAINERS_START_FAILED,
-         startFailedContainerCount.intValue());
-    sliderstats.put(StatusKeys.STATISTICS_CONTAINERS_SURPLUS,
-        surplusContainers.intValue());
-    sliderstats.put(StatusKeys.STATISTICS_CONTAINERS_UNKNOWN_COMPLETED,
-        completionOfUnknownContainerEvent.get());
-    return sliderstats;
-  }
 
   /**
    * Get the aggregate statistics across all roles
@@ -1949,7 +1439,7 @@ public class AppState {
    */
   public synchronized List<AbstractRMOperation> reviewRequestAndReleaseNodes()
       throws SliderInternalStateException, TriggerClusterTeardownException {
-    log.debug("in reviewRequestAndReleaseNodes()");
+    log.info("in reviewRequestAndReleaseNodes()");
     List<AbstractRMOperation> allOperations = new ArrayList<>();
     AbstractRMOperation blacklistOperation = updateBlacklist();
     if (blacklistOperation != null) {
@@ -1981,15 +1471,11 @@ public class AppState {
 
     if (failures > threshold) {
       throw new TriggerClusterTeardownException(
-        SliderExitCodes.EXIT_DEPLOYMENT_FAILED,
-          FinalApplicationStatus.FAILED, ErrorStrings.E_UNSTABLE_CLUSTER +
-        " - failed with component %s failed 'recently' %d times (%d in startup);" +
-        " threshold is %d - last failure: %s",
-          role.getName(),
-        role.getFailed(),
-        role.getStartFailed(),
-          threshold,
-        role.getFailureMessage());
+          SliderExitCodes.EXIT_DEPLOYMENT_FAILED, FinalApplicationStatus.FAILED,
+          ErrorStrings.E_UNSTABLE_CLUSTER
+              + " - failed with component %s failed 'recently' %d times;"
+              + " threshold is %d - last failure: %s", role.getName(),
+          role.getFailedRecently(), threshold, role.getFailureMessage());
     }
   }
 
@@ -2000,26 +1486,11 @@ public class AppState {
    * @return the threshold for failures
    */
   private int getFailureThresholdForRole(RoleStatus roleStatus) {
-    ConfTreeOperations resources =
-        instanceDefinition.getResourceOperations();
-    return resources.getComponentOptInt(roleStatus.getGroup(),
-        CONTAINER_FAILURE_THRESHOLD,
-        failureThreshold);
+    return (int) roleStatus.getProviderRole().component.getConfiguration()
+        .getPropertyLong(CONTAINER_FAILURE_THRESHOLD,
+            DEFAULT_CONTAINER_FAILURE_THRESHOLD);
   }
 
-  /**
-   * Get the node failure threshold for a specific role, falling back to
-   * the global one if not
-   * @param roleGroup role group
-   * @return the threshold for failures
-   */
-  private int getNodeFailureThresholdForRole(String roleGroup) {
-    ConfTreeOperations resources =
-        instanceDefinition.getResourceOperations();
-    return resources.getComponentOptInt(roleGroup,
-                                        NODE_FAILURE_THRESHOLD,
-                                        nodeFailureThreshold);
-  }
 
   /**
    * Reset the "recent" failure counts of all roles
@@ -2027,9 +1498,9 @@ public class AppState {
   public void resetFailureCounts() {
     for (RoleStatus roleStatus : getRoleStatusMap().values()) {
       long failed = roleStatus.resetFailedRecently();
-      log.info("Resetting failure count of {}; was {}",
-               roleStatus.getName(),
+      log.info("Resetting failure count of {}; was {}", roleStatus.getName(),
           failed);
+
     }
     roleHistory.resetFailedRecently();
   }
@@ -2075,6 +1546,7 @@ public class AppState {
   @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
   private List<AbstractRMOperation> reviewOneRole(RoleStatus role)
       throws SliderInternalStateException, TriggerClusterTeardownException {
+    log.info("review one role " + role.getName());
     List<AbstractRMOperation> operations = new ArrayList<>();
     long delta;
     long expected;
@@ -2123,7 +1595,8 @@ public class AppState {
           log.warn("Awaiting node map before generating anti-affinity requests");
         }
         log.info("Setting pending to {}", pending);
-        role.setPendingAntiAffineRequests(pending);
+        //TODO
+        role.setAAPending((int)pending);
       } else {
 
         for (int i = 0; i < delta; i++) {
@@ -2139,7 +1612,7 @@ public class AppState {
       long excess = -delta;
 
       // how many requests are outstanding? for AA roles, this includes pending
-      long outstandingRequests = role.getRequested() + role.getPendingAntiAffineRequests();
+      long outstandingRequests = role.getPending() + role.getAAPending();
       if (outstandingRequests > 0) {
         // outstanding requests.
         int toCancel = (int)Math.min(outstandingRequests, excess);
@@ -2153,8 +1626,7 @@ public class AppState {
               " expected to be able to cancel {} requests, but got {}",
               toCancel, cancellations.size());
         }
-
-        role.cancel(toCancel);
+        decPendingContainers(role, toCancel);
         excess -= toCancel;
         assert excess >= 0 : "Attempted to cancel too many requests";
         log.info("Submitted {} cancellations, leaving {} to release",
@@ -2215,9 +1687,9 @@ public class AppState {
     } else {
       // actual + requested == desired
       // there's a special case here: clear all pending AA requests
-      if (role.getPendingAntiAffineRequests() > 0) {
+      if (role.getAAPending() > 0) {
         log.debug("Clearing outstanding pending AA requests");
-        role.setPendingAntiAffineRequests(0);
+        role.setAAPending(0);
       }
     }
 
@@ -2269,28 +1741,6 @@ public class AppState {
   }
 
   /**
-   * Find a container running on a specific host -looking
-   * into the node ID to determine this.
-   *
-   * @param node node
-   * @param roleId role the container must be in
-   * @return a container or null if there are no containers on this host
-   * that can be released.
-   */
-  private RoleInstance findRoleInstanceOnHost(NodeInstance node, int roleId) {
-    Collection<RoleInstance> targets = cloneOwnedContainerList();
-    String hostname = node.hostname;
-    for (RoleInstance ri : targets) {
-      if (hostname.equals(RoleHistoryUtils.hostnameOf(ri.container))
-                         && ri.roleId == roleId
-        && containersBeingReleased.get(ri.getContainerId()) == null) {
-        return ri;
-      }
-    }
-    return null;
-  }
-
-  /**
    * Release all containers.
    * @return a list of operations to execute
    */
@@ -2329,26 +1779,25 @@ public class AppState {
    * @param assignments the assignments of roles to containers
    * @param operations any allocation or release operations
    */
-  public synchronized void onContainersAllocated(List<Container> allocatedContainers,
-                                    List<ContainerAssignment> assignments,
-                                    List<AbstractRMOperation> operations) {
-    assignments.clear();
-    operations.clear();
+  public synchronized void onContainersAllocated(
+      List<Container> allocatedContainers,
+      List<ContainerAssignment> assignments,
+      List<AbstractRMOperation> operations) {
     List<Container> ordered = roleHistory.prepareAllocationList(allocatedContainers);
-    log.debug("onContainersAllocated(): Total containers allocated = {}", ordered.size());
+    log.info("onContainersAllocated(): Total containers allocated = {}", ordered.size());
     for (Container container : ordered) {
       final NodeId nodeId = container.getNodeId();
       String containerHostInfo = nodeId.getHost() + ":" + nodeId.getPort();
       //get the role
       final ContainerId cid = container.getId();
       final RoleStatus role = lookupRoleStatus(container);
-
-      //dec requested count
-      role.decRequested();
+      decPendingContainers(role);
 
       //inc allocated count -this may need to be dropped in a moment,
       // but us needed to update the logic below
-      final long allocated = role.incActual();
+      MutableGaugeInt containersRunning = role.getComponentMetrics().containersRunning;
+      final long allocated = containersRunning.value();
+      incRunningContainers(role);
       final long desired = role.getDesired();
 
       final String roleName = role.getName();
@@ -2364,22 +1813,12 @@ public class AppState {
         log.info("Discarding surplus {} container {} on {}", roleName,  cid, containerHostInfo);
         operations.add(new ContainerReleaseOperation(cid));
         //register as a surplus node
-        surplusNodes.add(cid);
-        surplusContainers.inc();
-        //and, as we aren't binding it to role, dec that role's actual count
-        role.decActual();
+        surplusContainers.add(cid);
+        role.getComponentMetrics().surplusContainers.incr();
+        containersRunning.decr();
       } else {
-
-        // Allocation being accepted -so decrement the number of outstanding requests
-        decOutstandingContainerRequests();
-
-        log.info("Assigning role {} to container" +
-                 " {}," +
-                 " on {}:{},",
-                 roleName,
-                 cid,
-                 nodeId.getHost(),
-                 nodeId.getPort());
+        log.info("Assigning role {} to container" + " {}," + " on {}:{},",
+            roleName, cid, nodeId.getHost(), nodeId.getPort());
 
         assignments.add(new ContainerAssignment(container, role, outcome));
         //add to the history
@@ -2392,13 +1831,13 @@ public class AppState {
           if (node.canHost(role.getKey(), role.getLabelExpression())) {
             log.error("Assigned node still declares as available {}", node.toFullString() );
           }
-          if (role.getPendingAntiAffineRequests() > 0) {
+          if (role.getAAPending() > 0) {
             // still an outstanding AA request: need to issue a new one.
             log.info("Asking for next container for AA role {}", roleName);
             if (!addContainerRequest(operations, createAAContainerRequest(role))) {
               log.info("No capacity in cluster for new requests");
             } else {
-              role.decPendingAntiAffineRequests();
+              role.decAAPending();
             }
             log.debug("Current AA role status {}", role);
           } else {
@@ -2437,8 +1876,7 @@ public class AppState {
     for (Container container : liveContainers) {
       addRestartedContainer(container);
     }
-    clusterStatus.setInfo(StatusKeys.INFO_CONTAINERS_AM_RESTART,
-                               Integer.toString(liveContainers.size()));
+    app.setNumberOfRunningContainers((long)liveContainers.size());
     return true;
   }
 
@@ -2458,10 +1896,9 @@ public class AppState {
     
     // get the role
     int roleId = ContainerPriority.extractRole(container);
-    RoleStatus role =
-      lookupRoleStatus(roleId);
+    RoleStatus role = lookupRoleStatus(roleId);
     // increment its count
-    role.incActual();
+    incRunningContainers(role);
     String roleName = role.getName();
     
     log.info("Rebuilding container {} in role {} on {},",
@@ -2495,12 +1932,6 @@ public class AppState {
     final StringBuilder sb = new StringBuilder("AppState{");
     sb.append("applicationLive=").append(applicationLive);
     sb.append(", live nodes=").append(liveNodes.size());
-    sb.append(", startedContainers=").append(startedContainers);
-    sb.append(", startFailedContainerCount=").append(startFailedContainerCount);
-    sb.append(", surplusContainers=").append(surplusContainers);
-    sb.append(", failedContainerCount=").append(failedContainerCount);
-    sb.append(", outstanding non-AA Container Requests=")
-        .append(outstandingContainerRequests);
     sb.append('}');
     return sb.toString();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppStateBindingInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppStateBindingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppStateBindingInfo.java
index a8aa1a2..2dfded8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppStateBindingInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppStateBindingInfo.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.NodeReport;
-import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.api.resource.Application;
 import org.apache.slider.providers.ProviderRole;
 
 import java.util.ArrayList;
@@ -38,26 +38,24 @@ import java.util.Map;
  * are added.
  */
 public class AppStateBindingInfo {
-  public AggregateConf instanceDefinition;
   public Configuration serviceConfig = new Configuration();
-  public Configuration publishedProviderConf = new Configuration(false);
+  public Application application = null;
   public List<ProviderRole> roles = new ArrayList<>();
   public FileSystem fs;
   public Path historyPath;
   public List<Container> liveContainers = new ArrayList<>(0);
-  public Map<String, String> applicationInfo = new HashMap<>();
   public ContainerReleaseSelector releaseSelector = new SimpleReleaseSelector();
   /** node reports off the RM. */
   public List<NodeReport> nodeReports = new ArrayList<>(0);
 
   public void validate() throws IllegalArgumentException {
-    Preconditions.checkArgument(instanceDefinition != null, "null instanceDefinition");
     Preconditions.checkArgument(serviceConfig != null, "null appmasterConfig");
-    Preconditions.checkArgument(publishedProviderConf != null, "null publishedProviderConf");
     Preconditions.checkArgument(releaseSelector != null, "null releaseSelector");
     Preconditions.checkArgument(roles != null, "null providerRoles");
     Preconditions.checkArgument(fs != null, "null fs");
     Preconditions.checkArgument(historyPath != null, "null historyDir");
     Preconditions.checkArgument(nodeReports != null, "null nodeReports");
+    Preconditions.checkArgument(application != null, "null application");
+
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ProviderAppState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ProviderAppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ProviderAppState.java
index 37e9a7f..8046472 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ProviderAppState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ProviderAppState.java
@@ -21,14 +21,12 @@ package org.apache.slider.server.appmaster.state;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import org.apache.slider.api.ClusterDescription;
 import org.apache.slider.api.ClusterNode;
+import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.types.ApplicationLivenessInformation;
 import org.apache.slider.api.types.ComponentInformation;
 import org.apache.slider.api.types.NodeInformation;
 import org.apache.slider.api.types.RoleStatistics;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTreeOperations;
 import org.apache.slider.core.exceptions.NoSuchNodeException;
 import org.apache.slider.core.registry.docstore.PublishedConfigSet;
 import org.apache.slider.core.registry.docstore.PublishedExportsSet;
@@ -130,46 +128,16 @@ public class ProviderAppState implements StateAccessForProviders {
   }
 
   @Override
-  public ClusterDescription getClusterStatus() {
+  public Application getApplication() {
     return appState.getClusterStatus();
   }
 
   @Override
-  public ConfTreeOperations getResourcesSnapshot() {
-    return appState.getResourcesSnapshot();
-  }
-
-  @Override
-  public ConfTreeOperations getAppConfSnapshot() {
-    return appState.getAppConfSnapshot();
-  }
-
-  @Override
-  public ConfTreeOperations getInternalsSnapshot() {
-    return appState.getInternalsSnapshot();
-  }
-
-  @Override
   public boolean isApplicationLive() {
     return appState.isApplicationLive();
   }
 
   @Override
-  public long getSnapshotTime() {
-    return appState.getSnapshotTime();
-  }
-
-  @Override
-  public AggregateConf getInstanceDefinitionSnapshot() {
-    return appState.getInstanceDefinitionSnapshot();
-  }
-  
-  @Override
-  public AggregateConf getUnresolvedInstanceDefinition() {
-    return appState.getUnresolvedInstanceDefinition();
-  }
-
-  @Override
   public RoleStatus lookupRoleStatus(int key) {
     return appState.lookupRoleStatus(key);
   }
@@ -221,26 +189,16 @@ public class ProviderAppState implements StateAccessForProviders {
   }
 
   @Override
-  public ClusterDescription refreshClusterStatus() {
+  public Application refreshClusterStatus() {
     return appState.refreshClusterStatus();
   }
 
   @Override
-  public List<RoleStatus> cloneRoleStatusList() {
-    return appState.cloneRoleStatusList();
-  }
-
-  @Override
   public ApplicationLivenessInformation getApplicationLivenessInformation() {
     return appState.getApplicationLivenessInformation();
   }
 
   @Override
-  public Map<String, Integer> getLiveStatistics() {
-    return appState.getLiveStatistics();
-  }
-
-  @Override
   public Map<String, ComponentInformation> getComponentInfoSnapshot() {
     return appState.getComponentInfoSnapshot();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleHistory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleHistory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleHistory.java
index 38c70f3..b6c3675 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleHistory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleHistory.java
@@ -135,17 +135,6 @@ public class RoleHistory {
     outstandingRequests = new OutstandingRequestTracker();
   }
 
-  /**
-   * Register all metrics with the metrics infra
-   * @param metrics metrics
-   */
-  public void register(MetricsAndMonitoring metrics) {
-    metrics.register(RoleHistory.class, dirty, "dirty");
-    metrics.register(RoleHistory.class, nodesUpdatedTime, "nodes-updated.time");
-    metrics.register(RoleHistory.class, nodeUpdateReceived, "nodes-updated.flag");
-    metrics.register(RoleHistory.class, thawedDataTime, "thawed.time");
-    metrics.register(RoleHistory.class, saveTime, "saved.time");
-  }
 
   /**
    * safety check: make sure the role is unique amongst
@@ -1102,13 +1091,13 @@ public class RoleHistory {
     int roleId = role.getKey();
     List<OutstandingRequest> requests = new ArrayList<>(toCancel);
     // there may be pending requests which can be cancelled here
-    long pending = role.getPendingAntiAffineRequests();
+    long pending = role.getAAPending();
     if (pending > 0) {
       // there are some pending ones which can be cancelled first
       long pendingToCancel = Math.min(pending, toCancel);
       log.info("Cancelling {} pending AA allocations, leaving {}", toCancel,
           pendingToCancel);
-      role.setPendingAntiAffineRequests(pending - pendingToCancel);
+      role.setAAPending(pending - pendingToCancel);
       toCancel -= pendingToCancel;
     }
     if (toCancel > 0 && role.isAARequestOutstanding()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
index 30cfec9..de52f4e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
@@ -29,6 +29,7 @@ import org.apache.slider.api.ClusterNode;
 import org.apache.slider.api.proto.Messages;
 import org.apache.slider.api.types.ContainerInformation;
 import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.providers.ProviderRole;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -40,6 +41,7 @@ import java.util.List;
 public final class RoleInstance implements Cloneable {
 
   public Container container;
+  public ProviderRole providerRole;
   /**
    * Container ID
    */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: YARN-6335. Port slider's groovy unit tests to yarn native services. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestExecutionEnvironment.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestExecutionEnvironment.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestExecutionEnvironment.java
new file mode 100644
index 0000000..3e56a72
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestExecutionEnvironment.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.slider.utils.SliderTestBase;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test execution environment.
+ */
+public class TestExecutionEnvironment extends SliderTestBase {
+  protected static final Logger LOG =
+      LoggerFactory.getLogger(TestExecutionEnvironment.class);
+
+  @Test
+  public void testClientEnv() throws Throwable {
+    SliderUtils.validateSliderClientEnvironment(LOG);
+  }
+
+  @Test
+  public void testWinutils() throws Throwable {
+    SliderUtils.maybeVerifyWinUtilsValid();
+  }
+
+  @Test
+  public void testServerEnv() throws Throwable {
+    SliderUtils.validateSliderServerEnvironment(LOG, true);
+  }
+
+  @Test
+  public void testServerEnvNoDependencies() throws Throwable {
+    SliderUtils.validateSliderServerEnvironment(LOG, false);
+  }
+
+  @Test
+  public void testopenSSLEnv() throws Throwable {
+    SliderUtils.validateOpenSSLEnv(LOG);
+  }
+
+  @Test
+  public void testValidatePythonEnv() throws Throwable {
+    SliderUtils.validatePythonEnv(LOG);
+  }
+
+  @Test
+  public void testNativeLibs() throws Throwable {
+    assertNativeLibrariesPresent();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestMiscSliderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestMiscSliderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestMiscSliderUtils.java
new file mode 100644
index 0000000..bf6ee2c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestMiscSliderUtils.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.utils.SliderTestBase;
+import org.junit.Test;
+
+import java.net.URI;
+
+/**
+ * Test slider utils.
+ */
+public class TestMiscSliderUtils extends SliderTestBase {
+
+
+  public static final String CLUSTER1 = "cluster1";
+
+  @Test
+  public void testPurgeTempDir() throws Throwable {
+
+    Configuration configuration = new Configuration();
+    FileSystem fs = FileSystem.get(new URI("file:///"), configuration);
+    SliderFileSystem sliderFileSystem = new SliderFileSystem(fs, configuration);
+    Path inst = sliderFileSystem.createAppInstanceTempPath(CLUSTER1, "001");
+
+    assertTrue(fs.exists(inst));
+    sliderFileSystem.purgeAppInstanceTempFiles(CLUSTER1);
+    assertFalse(fs.exists(inst));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestPortScan.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestPortScan.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestPortScan.java
new file mode 100644
index 0000000..0953a8b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestPortScan.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.core.exceptions.SliderException;
+import org.junit.Test;
+
+import java.net.ServerSocket;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.junit.Assert.*;
+
+/**
+ * Test finding a port in a range.
+ */
+public class TestPortScan {
+
+  @Test
+  public void testScanPorts() throws Throwable {
+
+    ServerSocket server = new ServerSocket(0);
+
+    try {
+      int serverPort = server.getLocalPort();
+      assertFalse(SliderUtils.isPortAvailable(serverPort));
+      int port = SliderUtils.findFreePort(serverPort, 10);
+      assertTrue(port > 0 && serverPort < port);
+    } finally {
+      server.close();
+    }
+  }
+
+  @Test
+  public void testRequestedPortsLogic() throws Throwable {
+    PortScanner portScanner = new PortScanner();
+    portScanner.setPortRange("5,6,8-10, 11,14 ,20 - 22");
+    List<Integer> ports = portScanner.getRemainingPortsToCheck();
+    List<Integer> expectedPorts =
+        Arrays.asList(5, 6, 8, 9, 10, 11, 14, 20, 21, 22);
+    assertEquals(expectedPorts, ports);
+  }
+
+  @Test
+  public void testRequestedPortsOutOfOrder() throws Throwable {
+    PortScanner portScanner = new PortScanner();
+    portScanner.setPortRange("8-10,5,6, 11,20 - 22, 14 ");
+    List<Integer> ports = portScanner.getRemainingPortsToCheck();
+    List<Integer> expectedPorts =
+        Arrays.asList(5, 6, 8, 9, 10, 11, 14, 20, 21, 22);
+    assertEquals(expectedPorts, ports);
+  }
+
+  @Test
+  public void testFindAvailablePortInRange() throws Throwable {
+    ServerSocket server = new ServerSocket(0);
+    try {
+      int serverPort = server.getLocalPort();
+
+      PortScanner portScanner = new PortScanner();
+      portScanner.setPortRange("" + (serverPort-1) + "-" + (serverPort + 3));
+      int port = portScanner.getAvailablePort();
+      assertNotEquals(port, serverPort);
+      assertTrue(port >= serverPort -1 && port <= serverPort + 3);
+    } finally {
+      server.close();
+    }
+  }
+
+  @Test
+  public void testFindAvailablePortInList() throws Throwable {
+    ServerSocket server = new ServerSocket(0);
+    try {
+      int serverPort = server.getLocalPort();
+
+      PortScanner portScanner = new PortScanner();
+      portScanner.setPortRange("" + (serverPort-1) + ", " + (serverPort + 1));
+      int port = portScanner.getAvailablePort();
+      assertNotEquals(port, serverPort);
+      assertTrue(port == serverPort -1 || port == serverPort + 1);
+    } finally {
+      server.close();
+    }
+  }
+
+  @Test
+  public void testNoAvailablePorts() throws Throwable {
+    ServerSocket server1 = new ServerSocket(0);
+    ServerSocket server2 = new ServerSocket(0);
+    try {
+      int serverPort1 = server1.getLocalPort();
+      int serverPort2 = server2.getLocalPort();
+
+      PortScanner portScanner = new PortScanner();
+      portScanner.setPortRange("" + serverPort1+ ", " + serverPort2);
+      try {
+        portScanner.getAvailablePort();
+        fail("expected SliderException");
+      } catch (SliderException e) {
+        // expected
+      }
+    } finally {
+      server1.close();
+      server2.close();
+    }
+  }
+
+  @Test
+  public void testPortRemovedFromRange() throws Throwable {
+    ServerSocket server = new ServerSocket(0);
+    try {
+      int serverPort = server.getLocalPort();
+
+      PortScanner portScanner = new PortScanner();
+      portScanner.setPortRange("" + (serverPort-1) + "-" + (serverPort + 3));
+      int port = portScanner.getAvailablePort();
+      assertNotEquals(port, serverPort);
+      assertTrue(port >= serverPort -1 && port <= serverPort + 3);
+      assertFalse(portScanner.getRemainingPortsToCheck().contains(port));
+    } finally {
+      server.close();
+    }
+  }
+
+  @Test(expected = BadConfigException.class)
+  public void testBadRange() throws BadConfigException {
+    PortScanner portScanner = new PortScanner();
+    // note the em dash
+    portScanner.setPortRange("2000–2010");
+  }
+
+  @Test(expected = BadConfigException.class)
+  public void testEndBeforeStart() throws BadConfigException {
+    PortScanner portScanner = new PortScanner();
+    portScanner.setPortRange("2001-2000");
+  }
+
+  @Test(expected = BadConfigException.class)
+  public void testEmptyRange() throws BadConfigException {
+    PortScanner portScanner = new PortScanner();
+    portScanner.setPortRange("");
+  }
+
+  @Test(expected = BadConfigException.class)
+  public void testBlankRange() throws BadConfigException {
+    PortScanner portScanner = new PortScanner();
+    portScanner.setPortRange(" ");
+  }
+
+  @Test
+  public void testExtraComma() throws BadConfigException {
+    PortScanner portScanner = new PortScanner();
+    portScanner.setPortRange("2000-2001, ");
+    List<Integer> ports = portScanner.getRemainingPortsToCheck();
+    List<Integer> expectedPorts = Arrays.asList(2000, 2001);
+    assertEquals(expectedPorts, ports);
+  }
+
+  @Test
+  public void testExtraCommas() throws BadConfigException {
+    PortScanner portScanner = new PortScanner();
+    portScanner.setPortRange("2000-2001,, ,2003,");
+    List<Integer> ports = portScanner.getRemainingPortsToCheck();
+    List<Integer> expectedPorts = Arrays.asList(2000, 2001, 2003);
+    assertEquals(expectedPorts, ports);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderFileSystem.java
new file mode 100644
index 0000000..dbb7791
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderFileSystem.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.slider.common.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.common.SliderXmlConfKeys;
+import org.apache.slider.utils.SliderTestBase;
+import org.junit.Test;
+
+/**
+ * Test slider file system.
+ */
+public class TestSliderFileSystem extends SliderTestBase {
+  private static Configuration defaultConfiguration() {
+    return new Configuration();
+  }
+
+  private static Configuration createConfigurationWithKV(String key, String
+      value) {
+    Configuration conf = defaultConfiguration();
+    conf.set(key, value);
+    return conf;
+  }
+
+  @Test
+  public void testSliderBasePathDefaultValue() throws Throwable {
+    Configuration configuration = defaultConfiguration();
+    FileSystem fileSystem = FileSystem.get(configuration);
+
+    SliderFileSystem fs2 = new SliderFileSystem(fileSystem, configuration);
+    assertEquals(fs2.getBaseApplicationPath(), new Path(fileSystem
+        .getHomeDirectory(), ".slider"));
+  }
+
+  @Test
+  public void testSliderBasePathCustomValue() throws Throwable {
+    Configuration configuration = createConfigurationWithKV(SliderXmlConfKeys
+        .KEY_SLIDER_BASE_PATH, "/slider/cluster");
+    FileSystem fileSystem = FileSystem.get(configuration);
+    SliderFileSystem fs2 = new SliderFileSystem(fileSystem, configuration);
+
+    assertEquals(fs2.getBaseApplicationPath(), new Path("/slider/cluster"));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderTestUtils.java
new file mode 100644
index 0000000..a6e7db8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderTestUtils.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.slider.utils.SliderTestUtils;
+import org.junit.Test;
+import org.junit.internal.AssumptionViolatedException;
+
+/**
+ * Test slider test utils.
+ */
+public class TestSliderTestUtils extends SliderTestUtils {
+
+  @Test
+  public void testAssumeTrue() throws Throwable {
+
+    try {
+      assume(true, "true");
+    } catch (AssumptionViolatedException e) {
+      throw new Exception(e);
+    }
+  }
+
+  @Test
+  public void testAssumeFalse() throws Throwable {
+
+    try {
+      assume(false, "false");
+      fail("expected an exception");
+    } catch (AssumptionViolatedException ignored) {
+      //expected
+    }
+  }
+
+  @Test
+  public void testAssumeBoolOptionSetInConf() throws Throwable {
+    Configuration conf = new Configuration(false);
+    conf.set("key", "true");
+    try {
+      assumeBoolOption(conf, "key", false);
+    } catch (AssumptionViolatedException e) {
+      throw new Exception(e);
+    }
+  }
+
+  @Test
+  public void testAssumeBoolOptionUnsetInConf() throws Throwable {
+    Configuration conf = new Configuration(false);
+    try {
+      assumeBoolOption(conf, "key", true);
+    } catch (AssumptionViolatedException e) {
+      throw new Exception(e);
+    }
+  }
+
+
+  @Test
+  public void testAssumeBoolOptionFalseInConf() throws Throwable {
+    Configuration conf = new Configuration(false);
+    conf.set("key", "false");
+    try {
+      assumeBoolOption(conf, "key", true);
+      fail("expected an exception");
+    } catch (AssumptionViolatedException ignored) {
+      //expected
+    }
+  }
+
+  @Test
+  public void testAssumeBoolOptionFalseUnsetInConf() throws Throwable {
+    Configuration conf = new Configuration(false);
+    try {
+      assumeBoolOption(conf, "key", false);
+      fail("expected an exception");
+    } catch (AssumptionViolatedException ignored) {
+      //expected
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderUtils.java
index 0df6047..a525e09 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderUtils.java
@@ -17,13 +17,9 @@
 package org.apache.slider.common.tools;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl;
-import org.apache.slider.tools.TestUtility;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -33,7 +29,6 @@ import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -45,26 +40,6 @@ public class TestSliderUtils {
   public TemporaryFolder folder = new TemporaryFolder();
 
   @Test
-  public void testGetMetaInfoStreamFromZip() throws Exception {
-    String zipFileName = TestUtility.createAppPackage(
-        folder,
-        "testpkg",
-        "test.zip",
-        "target/test-classes/org/apache/slider/common/tools/test");
-    Configuration configuration = new Configuration();
-    FileSystem fs = FileSystem.getLocal(configuration);
-    log.info("fs working dir is {}", fs.getWorkingDirectory().toString());
-    SliderFileSystem sliderFileSystem = new SliderFileSystem(fs, configuration);
-
-    InputStream stream = SliderUtils.getApplicationResourceInputStream(
-        sliderFileSystem.getFileSystem(),
-        new Path(zipFileName),
-        "metainfo.xml");
-    Assert.assertTrue(stream != null);
-    Assert.assertTrue(stream.available() > 0);
-  }
-
-  @Test
   public void testTruncate() {
     Assert.assertEquals(SliderUtils.truncate(null, 5), null);
     Assert.assertEquals(SliderUtils.truncate("323", -1), "323");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestWindowsSupport.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestWindowsSupport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestWindowsSupport.java
new file mode 100644
index 0000000..829b897
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestWindowsSupport.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumFileSystem;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.service.ServiceStateException;
+import org.apache.hadoop.util.Shell;
+import org.apache.slider.utils.YarnMiniClusterTestBase;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Pattern;
+
+/**
+ * Test windows support.
+ */
+public class TestWindowsSupport extends YarnMiniClusterTestBase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestWindowsSupport.class);
+
+  private static final Pattern HAS_DRIVE_LETTER_SPECIFIER =
+      Pattern.compile("^/?[a-zA-Z]:");
+  public static final String WINDOWS_FILE =
+      "C:\\Users\\Administrator\\AppData\\Local\\Temp" +
+      "\\junit3180177850133852404\\testpkg\\appdef_1.zip";
+
+
+  private static boolean hasWindowsDrive(String path) {
+    return HAS_DRIVE_LETTER_SPECIFIER.matcher(path).find();
+  }
+
+  private static int startPositionWithoutWindowsDrive(String path) {
+    if (hasWindowsDrive(path)) {
+      return path.charAt(0) == '/' ? 3 : 2;
+    } else {
+      return 0;
+    }
+  }
+
+  @Test
+  public void testHasWindowsDrive() throws Throwable {
+    assertTrue(hasWindowsDrive(WINDOWS_FILE));
+  }
+
+  @Test
+  public void testStartPosition() throws Throwable {
+    assertEquals(2, startPositionWithoutWindowsDrive(WINDOWS_FILE));
+  }
+
+  @Test
+  public void testPathHandling() throws Throwable {
+    assumeWindows();
+
+    Path path = new Path(WINDOWS_FILE);
+    URI uri = path.toUri();
+    //    assert "file" == uri.scheme
+    assertNull(uri.getAuthority());
+
+    Configuration conf = new Configuration();
+
+    FileSystem localfs = FileSystem.get(uri, conf);
+    assertTrue(localfs instanceof ChecksumFileSystem);
+    try {
+      FileStatus stat = localfs.getFileStatus(path);
+      fail("expected an exception, got " + stat);
+    } catch (FileNotFoundException fnfe) {
+      // expected
+    }
+
+    try {
+      FSDataInputStream appStream = localfs.open(path);
+    } catch (FileNotFoundException fnfe) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testExecNonexistentBinary() throws Throwable {
+    assumeWindows();
+    List<String> commands = Arrays.asList("undefined-application", "--version");
+    try {
+      exec(0, commands);
+      fail("expected an exception");
+    } catch (ServiceStateException e) {
+      if (!(e.getCause() instanceof FileNotFoundException)) {
+        throw e;
+      }
+    }
+  }
+  @Test
+  public void testExecNonexistentBinary2() throws Throwable {
+    assumeWindows();
+    assertFalse(doesAppExist(Arrays.asList("undefined-application",
+        "--version")));
+  }
+
+  @Test
+  public void testEmitKillCommand() throws Throwable {
+
+    int result = killJavaProcesses("regionserver", 9);
+    // we know the exit code if there is no supported kill operation
+    assertTrue(getKillSupported() || result == -1);
+  }
+
+  @Test
+  public void testHadoopHomeDefined() throws Throwable {
+    assumeWindows();
+    String hadoopHome = Shell.getHadoopHome();
+    LOG.info("HADOOP_HOME={}", hadoopHome);
+  }
+
+  @Test
+  public void testHasWinutils() throws Throwable {
+    assumeWindows();
+    SliderUtils.maybeVerifyWinUtilsValid();
+  }
+
+  @Test
+  public void testExecWinutils() throws Throwable {
+    assumeWindows();
+    String winUtilsPath = Shell.getWinUtilsPath();
+    assertTrue(SliderUtils.isSet(winUtilsPath));
+    File winUtils = new File(winUtilsPath);
+    LOG.debug("Winutils is at {}", winUtils);
+
+    exec(0, Arrays.asList(winUtilsPath, "systeminfo"));
+  }
+
+  @Test
+  public void testPath() throws Throwable {
+    String path = extractPath();
+    LOG.info("Path value = {}", path);
+  }
+
+  @Test
+  public void testFindJavac() throws Throwable {
+    String name = Shell.WINDOWS ? "javac.exe" : "javac";
+    assertNotNull(locateExecutable(name));
+  }
+
+  @Test
+  public void testHadoopDLL() throws Throwable {
+    assumeWindows();
+    // split the path
+    File exepath = locateExecutable("HADOOP.DLL");
+    assertNotNull(exepath);
+    LOG.info("Hadoop DLL at: {}", exepath);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestZKIntegration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestZKIntegration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestZKIntegration.java
new file mode 100644
index 0000000..186123d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestZKIntegration.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.registry.server.services.MicroZookeeperServiceKeys;
+import org.apache.slider.client.SliderClient;
+import org.apache.slider.core.zk.ZKIntegration;
+import org.apache.slider.utils.KeysForTests;
+import org.apache.slider.utils.YarnZKMiniClusterTestBase;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZooDefs;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Test ZK integration.
+ */
+public class TestZKIntegration extends YarnZKMiniClusterTestBase implements
+    KeysForTests {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestZKIntegration.class);
+
+  public static final String USER = KeysForTests.USERNAME;
+  public static final int CONNECT_TIMEOUT = 5000;
+  private ZKIntegration zki;
+
+  @Before
+  public void createCluster() {
+    Configuration conf = getConfiguration();
+    String name = methodName.getMethodName();
+    File zkdir = new File("target/zk/${name}");
+    FileUtil.fullyDelete(zkdir);
+    conf.set(MicroZookeeperServiceKeys.KEY_ZKSERVICE_DIR, zkdir
+        .getAbsolutePath());
+    createMicroZKCluster("-"+ name, conf);
+  }
+
+  @After
+  public void closeZKI() throws IOException {
+    if (zki != null) {
+      zki.close();
+      zki = null;
+    }
+  }
+
+  public ZKIntegration initZKI() throws IOException, InterruptedException {
+    zki = createZKIntegrationInstance(
+        getZKBinding(), methodName.getMethodName(), true, false,
+        CONNECT_TIMEOUT);
+    return zki;
+  }
+
+  @Test
+  public void testListUserClustersWithoutAnyClusters() throws Throwable {
+    assertHasZKCluster();
+    initZKI();
+    String userPath = ZKIntegration.mkSliderUserPath(USER);
+    List<String> clusters = this.zki.getClusters();
+    assertTrue(SliderUtils.isEmpty(clusters));
+  }
+
+  @Test
+  public void testListUserClustersWithOneCluster() throws Throwable {
+    assertHasZKCluster();
+
+    initZKI();
+    String userPath = ZKIntegration.mkSliderUserPath(USER);
+    String fullPath = zki.createPath(userPath, "/cluster-",
+                                     ZooDefs.Ids.OPEN_ACL_UNSAFE,
+                                     CreateMode.EPHEMERAL_SEQUENTIAL);
+    LOG.info("Ephemeral path {}", fullPath);
+    List<String> clusters = zki.getClusters();
+    assertEquals(1, clusters.size());
+    assertTrue(fullPath.endsWith(clusters.get(0)));
+  }
+
+  @Test
+  public void testListUserClustersWithTwoCluster() throws Throwable {
+    initZKI();
+    String userPath = ZKIntegration.mkSliderUserPath(USER);
+    String c1 = createEphemeralChild(zki, userPath);
+    LOG.info("Ephemeral path $c1");
+    String c2 = createEphemeralChild(zki, userPath);
+    LOG.info("Ephemeral path $c2");
+    List<String> clusters = zki.getClusters();
+    assertEquals(2, clusters.size());
+    assertTrue((c1.endsWith(clusters.get(0)) && c2.endsWith(clusters.get(1))) ||
+        (c1.endsWith(clusters.get(1)) && c2.endsWith(clusters.get(0))));
+  }
+
+  @Test
+  public void testCreateAndDeleteDefaultZKPath() throws Throwable {
+    MockSliderClient client = new MockSliderClient();
+
+    String path = client.createZookeeperNodeInner("cl1", true);
+    zki = client.getLastZKIntegration();
+
+    String zkPath = ZKIntegration.mkClusterPath(USER, "cl1");
+    assertEquals("zkPath must be as expected", zkPath,
+        "/services/slider/users/" + USER + "/cl1");
+    assertEquals(path, zkPath);
+    assertNull("ZKIntegration should be null.", zki);
+    zki = createZKIntegrationInstance(getZKBinding(), "cl1", true, false,
+        CONNECT_TIMEOUT);
+    assertFalse(zki.exists(zkPath));
+
+    path = client.createZookeeperNodeInner("cl1", false);
+    zki = client.getLastZKIntegration();
+    assertNotNull(zki);
+    assertEquals("zkPath must be as expected", zkPath,
+        "/services/slider/users/" + USER + "/cl1");
+    assertEquals(path, zkPath);
+    assertTrue(zki.exists(zkPath));
+    zki.createPath(zkPath, "/cn", ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode
+        .PERSISTENT);
+    assertTrue(zki.exists(zkPath + "/cn"));
+    client.deleteZookeeperNode("cl1");
+    assertFalse(zki.exists(zkPath));
+  }
+
+  public static String createEphemeralChild(ZKIntegration zki, String userPath)
+      throws KeeperException, InterruptedException {
+    return zki.createPath(userPath, "/cluster-",
+                          ZooDefs.Ids.OPEN_ACL_UNSAFE,
+                          CreateMode.EPHEMERAL_SEQUENTIAL);
+  }
+
+  /**
+   * Test slider client that overriddes ZK client.
+   */
+  public class MockSliderClient extends SliderClient {
+    private ZKIntegration zki;
+
+    @Override
+    public String getUsername() {
+      return USER;
+    }
+
+    @Override
+    protected ZKIntegration getZkClient(String clusterName, String user) {
+      try {
+        zki = createZKIntegrationInstance(getZKBinding(), clusterName, true,
+            false, CONNECT_TIMEOUT);
+      } catch (Exception e) {
+        fail("creating ZKIntergration threw an exception");
+      }
+      return zki;
+    }
+
+    @Override
+    public Configuration getConfig() {
+      return new Configuration();
+    }
+
+    public ZKIntegration getLastZKIntegration() {
+      return zki;
+    }
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleConfResources.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleConfResources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleConfResources.java
new file mode 100644
index 0000000..f13fbcc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleConfResources.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.core.conf;
+
+import org.apache.slider.api.resource.Application;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.slider.utils.SliderTestUtils.JSON_SER_DESER;
+
+/**
+ * Names of the example configs.
+ */
+public final class ExampleConfResources {
+
+  public static final String APP_JSON = "app.json";
+  public static final String APP_RES = "app-resolved.json";
+  public static final String OVERRIDE_JSON = "app-override.json";
+  public static final String OVERRIDE_RES = "app-override-resolved.json";
+
+  public static final String PACKAGE = "/org/apache/slider/core/conf/examples/";
+
+
+  private static final String[] ALL_EXAMPLES = {APP_JSON, APP_RES,
+      OVERRIDE_JSON, OVERRIDE_RES};
+
+  public static final List<String> ALL_EXAMPLE_RESOURCES = new ArrayList<>();
+  static {
+    for (String example : ALL_EXAMPLES) {
+      ALL_EXAMPLE_RESOURCES.add(PACKAGE + example);
+    }
+  }
+
+  private ExampleConfResources() {
+  }
+
+  static Application loadResource(String name) throws IOException {
+    return JSON_SER_DESER.fromResource(PACKAGE + name);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeLoadExamples.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeLoadExamples.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeLoadExamples.java
new file mode 100644
index 0000000..48b0736
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeLoadExamples.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.core.conf;
+
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.common.tools.SliderUtils;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.util.Arrays;
+import java.util.Collection;
+
+import static org.apache.slider.utils.SliderTestUtils.JSON_SER_DESER;
+
+/**
+ * Test loading example resources.
+ */
+@RunWith(value = Parameterized.class)
+public class TestConfTreeLoadExamples extends Assert {
+  private String resource;
+
+  public TestConfTreeLoadExamples(String resource) {
+    this.resource = resource;
+  }
+
+  @Parameterized.Parameters
+  public static Collection<String[]> filenames() {
+    String[][] stringArray = new String[ExampleConfResources
+        .ALL_EXAMPLE_RESOURCES.size()][1];
+    int i = 0;
+    for (String s : ExampleConfResources.ALL_EXAMPLE_RESOURCES) {
+      stringArray[i++][0] = s;
+    }
+    return Arrays.asList(stringArray);
+  }
+
+  @Test
+  public void testLoadResource() throws Throwable {
+    try {
+      Application application = JSON_SER_DESER.fromResource(resource);
+      SliderUtils.resolve(application);
+    } catch (Exception e) {
+      throw new Exception("exception loading " + resource + ":" + e.toString());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfigurationResolve.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfigurationResolve.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfigurationResolve.java
new file mode 100644
index 0000000..285ddfa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfigurationResolve.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.core.conf;
+
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Configuration;
+import org.apache.slider.common.tools.SliderUtils;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.slider.api.InternalKeys.CHAOS_MONKEY_INTERVAL;
+import static org.apache.slider.api.InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS;
+import static org.apache.slider.api.InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS;
+import static org.apache.slider.api.InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_MINUTES;
+import static org.apache.slider.core.conf.ExampleConfResources.APP_JSON;
+import static org.apache.slider.core.conf.ExampleConfResources.OVERRIDE_JSON;
+
+/**
+ * Test global configuration resolution.
+ */
+public class TestConfigurationResolve extends Assert {
+  protected static final Logger LOG =
+      LoggerFactory.getLogger(TestConfigurationResolve.class);
+
+  @Test
+  public void testOverride() throws Throwable {
+
+    Application orig = ExampleConfResources.loadResource(OVERRIDE_JSON);
+
+    Configuration global = orig.getConfiguration();
+    assertEquals("a", global.getProperty("g1"));
+    assertEquals("b", global.getProperty("g2"));
+
+    Configuration simple = orig.getComponent("simple").getConfiguration();
+    assertEquals(0, simple.getProperties().size());
+
+    Configuration master = orig.getComponent("master").getConfiguration();
+    assertEquals("m", master.getProperty("name"));
+    assertEquals("overridden", master.getProperty("g1"));
+
+    Configuration worker = orig.getComponent("worker").getConfiguration();
+    LOG.info("worker = {}", worker);
+    assertEquals(3, worker.getProperties().size());
+
+    assertEquals("worker", worker.getProperty("name"));
+    assertEquals("overridden-by-worker", worker.getProperty("g1"));
+    assertNull(worker.getProperty("g2"));
+    assertEquals("1000", worker.getProperty("timeout"));
+
+    // here is the resolution
+    SliderUtils.resolve(orig);
+
+    global = orig.getConfiguration();
+    LOG.info("global = {}", global);
+    assertEquals("a", global.getProperty("g1"));
+    assertEquals("b", global.getProperty("g2"));
+
+    simple = orig.getComponent("simple").getConfiguration();
+    assertEquals(2, simple.getProperties().size());
+    assertEquals("a", simple.getProperty("g1"));
+    assertEquals("b", simple.getProperty("g2"));
+
+
+    master = orig.getComponent("master").getConfiguration();
+    LOG.info("master = {}", master);
+    assertEquals(3, master.getProperties().size());
+    assertEquals("m", master.getProperty("name"));
+    assertEquals("overridden", master.getProperty("g1"));
+    assertEquals("b", master.getProperty("g2"));
+
+    worker = orig.getComponent("worker").getConfiguration();
+    LOG.info("worker = {}", worker);
+    assertEquals(4, worker.getProperties().size());
+
+    assertEquals("worker", worker.getProperty("name"));
+    assertEquals("overridden-by-worker", worker.getProperty("g1"));
+    assertEquals("b", worker.getProperty("g2"));
+    assertEquals("1000", worker.getProperty("timeout"));
+
+  }
+
+  @Test
+  public void testTimeIntervalLoading() throws Throwable {
+
+    Application orig = ExampleConfResources.loadResource(APP_JSON);
+
+    Configuration conf = orig.getConfiguration();
+    long s = conf.getPropertyLong(
+        CHAOS_MONKEY_INTERVAL + SliderUtils.SECONDS,
+        0);
+    assertEquals(60, s);
+    long monkeyInterval = SliderUtils.getTimeRange(conf,
+        CHAOS_MONKEY_INTERVAL,
+        DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS,
+        DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS,
+        DEFAULT_CHAOS_MONKEY_INTERVAL_MINUTES,
+        0);
+    assertEquals(60L, monkeyInterval);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestFilesystemPermissions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestFilesystemPermissions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestFilesystemPermissions.java
new file mode 100644
index 0000000..fd794ea
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestFilesystemPermissions.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.other;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.DiskChecker;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
+import org.apache.slider.utils.YarnMiniClusterTestBase;
+import org.junit.After;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * This test class exists to look at permissions of the filesystem, especially
+ * that created by Mini YARN clusters. On some windows jenkins machines,
+ * YARN actions were failing as the directories had the wrong permissions
+ * (i.e. too lax)
+ */
+public class TestFilesystemPermissions extends YarnMiniClusterTestBase {
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+      TestFilesystemPermissions.class);
+
+  private List<File> filesToDelete = new ArrayList<>();
+
+  @After
+  public void deleteFiles() {
+    for (File f : filesToDelete) {
+      FileUtil.fullyDelete(f, true);
+    }
+  }
+
+  @Test
+  public void testJavaFSOperations() throws Throwable {
+    assertNativeLibrariesPresent();
+    File subdir = testDir();
+    subdir.mkdir();
+    assertTrue(subdir.isDirectory());
+    assertTrue(FileUtil.canRead(subdir));
+    assertTrue(FileUtil.canWrite(subdir));
+    assertTrue(FileUtil.canExecute(subdir));
+  }
+
+  @Test
+  public void testDiskCheckerOperations() throws Throwable {
+    assertNativeLibrariesPresent();
+    File subdir = testDir();
+    subdir.mkdir();
+    DiskChecker checker = new DiskChecker();
+    checker.checkDir(subdir);
+  }
+
+  @Test
+  public void testDiskCheckerMkdir() throws Throwable {
+    assertNativeLibrariesPresent();
+    File subdir = testDir();
+    subdir.mkdirs();
+    DiskChecker checker = new DiskChecker();
+    checker.checkDir(subdir);
+  }
+
+  /**
+   * Get a test dir for this method; one that will be deleted on teardown.
+   * @return a filename unique to this test method
+   */
+  File testDir() {
+    File parent = new File("target/testfspermissions");
+    parent.mkdir();
+    File testdir = new File(parent, methodName.getMethodName());
+    filesToDelete.add(testdir);
+    return testdir;
+  }
+
+
+  @Test
+  public void testPermsMap() throws Throwable {
+    File dir = testDir();
+    String diruri = dir.toURI().toString();
+    FileContext lfs = createLocalFS(dir, getConfiguration());
+    getLocalDirsPathPermissionsMap(lfs, diruri);
+  }
+
+  @Test
+  public void testInitLocaldir() throws Throwable {
+    File dir = testDir();
+    String diruri = dir.toURI().toString();
+    FileContext lfs = createLocalFS(dir, getConfiguration());
+    initializeLocalDir(lfs, diruri);
+    List<String> localDirs = getInitializedLocalDirs(lfs, Arrays.asList(
+        diruri));
+    assertEquals(1, localDirs.size());
+  }
+
+
+  @Test
+  public void testValidateMiniclusterPerms() throws Throwable {
+    int numLocal = 1;
+    String cluster = createMiniCluster("", getConfiguration(), 1, numLocal, 1,
+        false);
+    File workDir = getMiniCluster().getTestWorkDir();
+    List<File> localdirs = new ArrayList<>();
+    for (File file : workDir.listFiles()) {
+      if (file.isDirectory() && file.getAbsolutePath().contains("-local")) {
+        // local dir
+        localdirs.add(file);
+      }
+    }
+    assertEquals(numLocal, localdirs.size());
+    FileContext lfs = createLocalFS(workDir, getConfiguration());
+    for (File file : localdirs) {
+      checkLocalDir(lfs, file.toURI().toString());
+    }
+  }
+
+  FileContext createLocalFS(File dir, Configuration conf)
+      throws UnsupportedFileSystemException {
+    return FileContext.getFileContext(dir.toURI(), conf);
+  }
+
+  /**
+   * Extracted from ResourceLocalizationService.
+   * @param lfs
+   * @param localDir
+   * @return perms map
+   * @see ResourceLocalizationService
+   */
+  private Map<Path, FsPermission> getLocalDirsPathPermissionsMap(
+      FileContext lfs,
+      String localDir) {
+    Map<Path, FsPermission> localDirPathFsPermissionsMap = new HashMap<>();
+
+    FsPermission defaultPermission =
+        FsPermission.getDirDefault().applyUMask(lfs.getUMask());
+    FsPermission nmPrivatePermission =
+        ResourceLocalizationService.NM_PRIVATE_PERM.applyUMask(lfs.getUMask());
+
+    Path userDir = new Path(localDir, ContainerLocalizer.USERCACHE);
+    Path fileDir = new Path(localDir, ContainerLocalizer.FILECACHE);
+    Path sysDir = new Path(
+        localDir,
+        ResourceLocalizationService.NM_PRIVATE_DIR);
+
+    localDirPathFsPermissionsMap.put(userDir, defaultPermission);
+    localDirPathFsPermissionsMap.put(fileDir, defaultPermission);
+    localDirPathFsPermissionsMap.put(sysDir, nmPrivatePermission);
+    return localDirPathFsPermissionsMap;
+  }
+
+  private boolean checkLocalDir(FileContext lfs, String localDir)
+      throws IOException {
+
+    Map<Path, FsPermission> pathPermissionMap =
+        getLocalDirsPathPermissionsMap(lfs, localDir);
+
+    for (Map.Entry<Path, FsPermission> entry : pathPermissionMap.entrySet()) {
+      FileStatus status;
+      status = lfs.getFileStatus(entry.getKey());
+
+      if (!status.getPermission().equals(entry.getValue())) {
+        String msg =
+            "Permissions incorrectly set for dir " + entry.getKey() +
+                ", should be " + entry.getValue() + ", actual value = " +
+                status.getPermission();
+        throw new YarnRuntimeException(msg);
+      }
+    }
+    return true;
+  }
+
+
+  private void initializeLocalDir(FileContext lfs, String localDir)
+      throws IOException {
+
+    Map<Path, FsPermission> pathPermissionMap =
+        getLocalDirsPathPermissionsMap(lfs, localDir);
+    for (Map.Entry<Path, FsPermission> entry : pathPermissionMap.entrySet()) {
+      FileStatus status;
+      try {
+        status = lfs.getFileStatus(entry.getKey());
+      } catch (FileNotFoundException fs) {
+        status = null;
+      }
+
+      if (status == null) {
+        lfs.mkdir(entry.getKey(), entry.getValue(), true);
+        status = lfs.getFileStatus(entry.getKey());
+      }
+      FsPermission perms = status.getPermission();
+      if (!perms.equals(entry.getValue())) {
+        lfs.setPermission(entry.getKey(), entry.getValue());
+      }
+    }
+  }
+
+  synchronized private List<String> getInitializedLocalDirs(FileContext lfs,
+      List<String> dirs) throws IOException {
+    List<String> checkFailedDirs = new ArrayList<String>();
+    for (String dir : dirs) {
+      try {
+        checkLocalDir(lfs, dir);
+      } catch (YarnRuntimeException e) {
+        checkFailedDirs.add(dir);
+      }
+    }
+    for (String dir : checkFailedDirs) {
+      LOG.info("Attempting to initialize " + dir);
+      initializeLocalDir(lfs, dir);
+      checkLocalDir(lfs, dir);
+    }
+    return dirs;
+  }
+
+
+  private void createDir(FileContext localFs, Path dir, FsPermission perm)
+  throws IOException {
+    if (dir == null) {
+      return;
+    }
+    try {
+      localFs.getFileStatus(dir);
+    } catch (FileNotFoundException e) {
+      createDir(localFs, dir.getParent(), perm);
+      localFs.mkdir(dir, perm, false);
+      if (!perm.equals(perm.applyUMask(localFs.getUMask()))) {
+        localFs.setPermission(dir, perm);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestLocalDirStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestLocalDirStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestLocalDirStatus.java
new file mode 100644
index 0000000..704c71e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestLocalDirStatus.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.other;
+
+import org.apache.slider.utils.SliderTestUtils;
+import org.apache.slider.utils.TestUtility;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+
+/**
+ * This test exists to diagnose local FS permissions.
+ */
+public class TestLocalDirStatus extends SliderTestUtils {
+
+
+  public static final int SIZE = 0x200000;
+
+  @Test
+  public void testTempDir() throws Throwable {
+    File tmpf = null;
+    try {
+      tmpf = File.createTempFile("testl", ".bin");
+      createAndReadFile(tmpf, SIZE);
+      tmpf.delete();
+      assertFalse(tmpf.exists());
+    } finally {
+      if (tmpf != null) {
+        tmpf.delete();
+      }
+    }
+  }
+
+  @Test
+  public void testTargetDir() throws Throwable {
+    File target = target();
+    File tmpf = null;
+    try {
+      tmpf = File.createTempFile("testl", ".bin", target);
+      createAndReadFile(tmpf, SIZE);
+      tmpf.delete();
+      assertFalse(tmpf.exists());
+    } finally {
+      if (tmpf != null) {
+        tmpf.delete();
+      }
+
+    }
+  }
+
+  public File target() {
+    File target = new File("target").getAbsoluteFile();
+    assertTrue(target.exists());
+    return target;
+  }
+
+  @Test
+  public void testRenameInTargetDir() throws Throwable {
+    File target = target();
+    File tmpf = null;
+    File dst= null;
+    try {
+      tmpf = File.createTempFile("testl", ".bin", target);
+      dst = File.createTempFile("test-dest", ".bin", target);
+      createRenameAndReadFile(tmpf, dst, SIZE);
+      assertFalse(tmpf.exists());
+      dst.delete();
+    } finally {
+      if (tmpf != null) {
+        tmpf.delete();
+      }
+      if (dst != null) {
+        dst.delete();
+      }
+    }
+  }
+
+  @Test
+  public void testRenameInTmpDir() throws Throwable {
+    File tmpf = null;
+    File dst= null;
+    try {
+      tmpf = File.createTempFile("testl", ".bin");
+      dst = File.createTempFile("test-dest", ".bin");
+      createRenameAndReadFile(tmpf, dst, SIZE);
+      assertFalse(tmpf.exists());
+      dst.delete();
+    } finally {
+      if (tmpf != null) {
+        tmpf.delete();
+      }
+      if (dst != null) {
+        dst.delete();
+      }
+    }
+  }
+
+  protected void createAndReadFile(File path, int len) throws IOException {
+    byte[] dataset = TestUtility.dataset(len, 32, 128);
+    writeFile(path, dataset);
+    assertTrue(path.exists());
+    assertEquals(len, path.length());
+    byte[] persisted = readFile(path);
+    TestUtility.compareByteArrays(dataset, persisted, len);
+  }
+
+  protected void createRenameAndReadFile(File src, File dst, int len)
+      throws IOException {
+    byte[] dataset = TestUtility.dataset(len, 32, 128);
+    writeFile(src, dataset);
+    assertTrue(src.exists());
+    assertEquals(len, src.length());
+    dst.delete();
+    assertFalse(dst.exists());
+    assertTrue(src.renameTo(dst));
+    assertEquals(len, dst.length());
+    byte[] persisted = readFile(dst);
+    TestUtility.compareByteArrays(dataset, persisted, len);
+  }
+
+  protected void writeFile(File path, byte[] dataset)
+      throws IOException {
+    FileOutputStream out = new FileOutputStream(path);
+    try {
+      out.write(dataset);
+      out.flush();
+    } finally {
+      out.close();
+    }
+  }
+
+  protected byte[] readFile(File path) throws IOException {
+    assertTrue(path.getAbsoluteFile().exists());
+    assertTrue(path.getAbsoluteFile().isFile());
+    int len = (int)path.length();
+    byte[] dataset = new byte[len];
+    FileInputStream ins = new FileInputStream(path);
+    try {
+      ins.read(dataset);
+    } finally {
+      ins.close();
+    }
+    return dataset;
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestProviderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestProviderFactory.java
new file mode 100644
index 0000000..11abdfe
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestProviderFactory.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.providers;
+
+import org.apache.slider.providers.docker.DockerKeys;
+import org.apache.slider.providers.docker.DockerProviderFactory;
+import org.junit.Test;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test provider factories.
+ */
+public class TestProviderFactory {
+  @Test
+  public void testLoadAgentProvider() throws Throwable {
+    SliderProviderFactory factory = SliderProviderFactory
+        .createSliderProviderFactory(DockerKeys.PROVIDER_DOCKER);
+    assertTrue(factory instanceof DockerProviderFactory);
+  }
+
+  @Test
+  public void testCreateClientProvider() throws Throwable {
+    SliderProviderFactory factory = SliderProviderFactory
+        .createSliderProviderFactory(DockerKeys.PROVIDER_DOCKER);
+    assertNotNull(factory.createClientProvider());
+  }
+
+  @Test
+  public void testCreateProviderByClassname() throws Throwable {
+    SliderProviderFactory factory = SliderProviderFactory
+        .createSliderProviderFactory(DockerKeys.PROVIDER_DOCKER);
+    assertNotNull(factory.createServerProvider());
+    assertTrue(factory instanceof DockerProviderFactory);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestConfigSetNaming.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestConfigSetNaming.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestConfigSetNaming.java
new file mode 100644
index 0000000..100518e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestConfigSetNaming.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.registry;
+
+import org.apache.slider.core.registry.docstore.PublishedConfigSet;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+/**
+ * Test config set name validation.
+ */
+public class TestConfigSetNaming {
+
+  void assertValid(String name) {
+    PublishedConfigSet.validateName(name);
+  }
+
+  void assertInvalid(String name) {
+    try {
+      PublishedConfigSet.validateName(name);
+      Assert.fail("Invalid name was unexpectedly parsed: " + name);
+    } catch (IllegalArgumentException expected) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testLowerCase() throws Throwable {
+    assertValid("abcdefghijklmnopqrstuvwxyz");
+  }
+
+  @Test
+  public void testUpperCaseInvalid() throws Throwable {
+    assertInvalid("ABCDEFGHIJKLMNOPQRSTUVWXYZ");
+  }
+
+  @Test
+  public void testNumbers() throws Throwable {
+    assertValid("01234567890");
+  }
+
+  @Test
+  public void testChars() throws Throwable {
+    assertValid("a-_+");
+  }
+
+  @Test
+  public void testInvalids() throws Throwable {
+    for (String s : Arrays.asList(
+        "",
+        " ",
+        "*",
+        "a/b",
+        "b\\a",
+        "\"",
+        "'",
+        "\u0000",
+        "\u0f00",
+        "key.value",
+        "-",
+        "+",
+        "_",
+        "?")) {
+      assertInvalid(s);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestRegistryPaths.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestRegistryPaths.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestRegistryPaths.java
new file mode 100644
index 0000000..5fcfd89
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestRegistryPaths.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.registry;
+
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.slider.core.registry.SliderRegistryUtils;
+import org.apache.slider.utils.SliderTestUtils;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test registry paths.
+ */
+public class TestRegistryPaths {
+
+  @Test
+  public void testHomedirKerberos() throws Throwable {
+    String home = RegistryUtils.homePathForUser("hbase@HADOOP.APACHE.ORG");
+    try {
+      assertEquals("/users/hbase", home);
+    } catch (AssertionError e) {
+      SliderTestUtils.skip("homedir filtering not yet in hadoop registry " +
+          "module");
+    }
+  }
+
+  @Test
+  public void testHomedirKerberosHost() throws Throwable {
+    String home = RegistryUtils.homePathForUser("hbase/localhost@HADOOP" +
+        ".APACHE.ORG");
+    try {
+      assertEquals("/users/hbase", home);
+    } catch (AssertionError e) {
+      SliderTestUtils.skip("homedir filtering not yet in hadoop registry " +
+          "module");
+    }
+  }
+
+  @Test
+  public void testRegistryPathForInstance() throws Throwable {
+    String path = SliderRegistryUtils.registryPathForInstance("instance");
+    assertTrue(path.endsWith("/instance"));
+  }
+
+  @Test
+  public void testPathResolution() throws Throwable {
+    String home = RegistryUtils.homePathForCurrentUser();
+    assertEquals(home, SliderRegistryUtils.resolvePath("~"));
+    assertEquals(home +"/", SliderRegistryUtils.resolvePath("~/"));
+    assertEquals(home +"/something", SliderRegistryUtils.resolvePath(
+        "~/something"));
+    assertEquals("~unresolved", SliderRegistryUtils.resolvePath(
+        "~unresolved"));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/actions/TestActions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/actions/TestActions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/actions/TestActions.java
new file mode 100644
index 0000000..68d55aa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/actions/TestActions.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.actions;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.ServiceOperations;
+import org.apache.slider.server.appmaster.SliderAppMaster;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.services.workflow.ServiceThreadFactory;
+import org.apache.slider.server.services.workflow.WorkflowExecutorService;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test AM actions.
+ */
+public class TestActions {
+  protected static final Logger LOG =
+      LoggerFactory.getLogger(TestActions.class);
+
+  private QueueService queues;
+  private WorkflowExecutorService<ExecutorService> executorService;
+
+
+  @Before
+  public void createService() {
+    queues = new QueueService();
+
+    Configuration conf = new Configuration();
+    queues.init(conf);
+
+    queues.start();
+
+    executorService = new WorkflowExecutorService<>("AmExecutor",
+        Executors.newCachedThreadPool(
+            new ServiceThreadFactory("AmExecutor", true)));
+
+    executorService.init(conf);
+    executorService.start();
+  }
+
+  @After
+  public void destroyService() {
+    ServiceOperations.stop(executorService);
+    ServiceOperations.stop(queues);
+  }
+
+  @Test
+  public void testBasicService() throws Throwable {
+    queues.start();
+  }
+
+  @Test
+  public void testDelayLogic() throws Throwable {
+    ActionNoteExecuted action = new ActionNoteExecuted("", 1000);
+    long now = System.currentTimeMillis();
+
+    long delay = action.getDelay(TimeUnit.MILLISECONDS);
+    assertTrue(delay >= 800);
+    assertTrue(delay <= 1800);
+
+    ActionNoteExecuted a2 = new ActionNoteExecuted("a2", 10000);
+    assertTrue(action.compareTo(a2) < 0);
+    assertTrue(a2.compareTo(action) > 0);
+    assertEquals(0, action.compareTo(action));
+
+  }
+
+  @Test
+  public void testActionDelayedExecutorTermination() throws Throwable {
+    long start = System.currentTimeMillis();
+
+    ActionStopQueue stopAction = new ActionStopQueue(1000);
+    queues.scheduledActions.add(stopAction);
+    queues.run();
+    AsyncAction take = queues.actionQueue.take();
+    assertEquals(take, stopAction);
+    long stop = System.currentTimeMillis();
+    assertTrue(stop - start > 500);
+    assertTrue(stop - start < 1500);
+  }
+
+  @Test
+  public void testImmediateQueue() throws Throwable {
+    ActionNoteExecuted noteExecuted = new ActionNoteExecuted("executed", 0);
+    queues.put(noteExecuted);
+    queues.put(new ActionStopQueue(0));
+    QueueExecutor ex = new QueueExecutor(queues);
+    ex.run();
+    assertTrue(queues.actionQueue.isEmpty());
+    assertTrue(noteExecuted.executed.get());
+  }
+
+  @Test
+  public void testActionOrdering() throws Throwable {
+
+    ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500);
+    ActionStopQueue stop = new ActionStopQueue(1500);
+    ActionNoteExecuted note2 = new ActionNoteExecuted("note2", 800);
+
+    List<AsyncAction> actions = Arrays.asList(note1, stop, note2);
+    Collections.sort(actions);
+    assertEquals(actions.get(0), note1);
+    assertEquals(actions.get(1), note2);
+    assertEquals(actions.get(2), stop);
+  }
+
+  @Test
+  public void testDelayedQueueWithReschedule() throws Throwable {
+
+    ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500);
+    ActionStopQueue stop = new ActionStopQueue(1500);
+    ActionNoteExecuted note2 = new ActionNoteExecuted("note2", 800);
+
+    assertTrue(note2.compareTo(stop) < 0);
+    assertTrue(note1.getNanos() < note2.getNanos());
+    assertTrue(note2.getNanos() < stop.getNanos());
+    queues.schedule(note1);
+    queues.schedule(note2);
+    queues.schedule(stop);
+    // async to sync expected to run in order
+    runQueuesToCompletion();
+    assertTrue(note1.executed.get());
+    assertTrue(note2.executed.get());
+  }
+
+  public void runQueuesToCompletion() {
+    queues.run();
+    assertTrue(queues.scheduledActions.isEmpty());
+    assertFalse(queues.actionQueue.isEmpty());
+    QueueExecutor ex = new QueueExecutor(queues);
+    ex.run();
+    // flush all stop commands from the queue
+    queues.flushActionQueue(ActionStopQueue.class);
+
+    assertTrue(queues.actionQueue.isEmpty());
+  }
+
+  @Test
+  public void testRenewedActionFiresOnceAtLeast() throws Throwable {
+    ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500);
+    RenewingAction renewer = new RenewingAction(
+        note1,
+        500,
+        100,
+        TimeUnit.MILLISECONDS,
+        3);
+    queues.schedule(renewer);
+    ActionStopQueue stop = new ActionStopQueue(4, TimeUnit.SECONDS);
+    queues.schedule(stop);
+    // this runs all the delayed actions FIRST, so can't be used
+    // to play tricks of renewing actions ahead of the stop action
+    runQueuesToCompletion();
+    assertEquals(1, renewer.executionCount.intValue());
+    assertEquals(1, note1.executionCount.intValue());
+    // assert the renewed item is back in
+    assertTrue(queues.scheduledActions.contains(renewer));
+  }
+
+
+  @Test
+  public void testRenewingActionOperations() throws Throwable {
+    ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500);
+    RenewingAction renewer = new RenewingAction(
+        note1,
+        100,
+        100,
+        TimeUnit.MILLISECONDS,
+        3);
+    queues.renewing("note", renewer);
+    assertTrue(queues.removeRenewingAction("note"));
+    queues.stop();
+    assertTrue(queues.waitForServiceToStop(10000));
+  }
+
+  /**
+   * Test action.
+   */
+  public class ActionNoteExecuted extends AsyncAction {
+    private final AtomicBoolean executed = new AtomicBoolean(false);
+    private final AtomicLong executionTimeNanos = new AtomicLong();
+    private final AtomicLong executionCount = new AtomicLong();
+
+    public ActionNoteExecuted(String text, int delay) {
+      super(text, delay);
+    }
+
+    @Override
+    public void execute(
+        SliderAppMaster appMaster,
+        QueueAccess queueService,
+        AppState appState) throws Exception {
+      LOG.info("Executing {}", name);
+      executed.set(true);
+      executionTimeNanos.set(System.nanoTime());
+      executionCount.incrementAndGet();
+      LOG.info(this.toString());
+
+      synchronized (this) {
+        this.notify();
+      }
+    }
+
+    @Override
+    public String toString() {
+      return super.toString() + " executed=" + executed.get() + "; count=" +
+          executionCount.get() + ";";
+    }
+
+    public long getExecutionCount() {
+      return executionCount.get();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/BaseMockAppStateAATest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/BaseMockAppStateAATest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/BaseMockAppStateAATest.java
new file mode 100644
index 0000000..c1f2886
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/BaseMockAppStateAATest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.providers.PlacementPolicy;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+
+import static org.apache.slider.api.ResourceKeys.COMPONENT_PLACEMENT_POLICY;
+
+/**
+ * Class for basis of Anti-affine placement tests; sets up role2
+ * for anti-affinity.
+ */
+public class BaseMockAppStateAATest extends BaseMockAppStateTest
+    implements MockRoles {
+
+  /** Role status for the base AA role. */
+  private RoleStatus aaRole;
+
+  /** Role status for the AA role requiring a node with the gpu label. */
+  private RoleStatus gpuRole;
+
+  @Override
+  public Application buildApplication() {
+    Application application = factory.newApplication(0, 0, 0)
+        .name(getTestName());
+    application.getComponent(ROLE1).getConfiguration().setProperty(
+        COMPONENT_PLACEMENT_POLICY, Integer.toString(PlacementPolicy
+            .ANTI_AFFINITY_REQUIRED));
+    application.getComponent(ROLE1).getConfiguration().setProperty(
+        ResourceKeys.YARN_LABEL_EXPRESSION, LABEL_GPU);
+    application.getComponent(ROLE2).getConfiguration().setProperty(
+        COMPONENT_PLACEMENT_POLICY, Integer.toString(PlacementPolicy
+            .ANTI_AFFINITY_REQUIRED));
+    return application;
+  }
+
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    aaRole = lookupRole(ROLE2);
+    gpuRole = lookupRole(ROLE1);
+  }
+
+  protected RoleStatus getAaRole() {
+    return aaRole;
+  }
+
+  protected RoleStatus getGpuRole() {
+    return gpuRole;
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: YARN-6255. Refactor yarn-native-services framework. Contributed by Jian He

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
index c6cc2d0..2bb224b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.service.Service;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.types.NodeInformationList;
 import org.apache.slider.api.types.SliderInstanceDescription;
 import org.apache.slider.common.params.AbstractClusterBuildingActionArgs;
@@ -61,15 +62,8 @@ import java.util.Map;
  * Stability: evolving
  */
 public interface SliderClientAPI extends Service {
-  /**
-   * Destroy a cluster. There's two race conditions here
-   * #1 the cluster is started between verifying that there are no live
-   * clusters of that name.
-   */
-  int actionDestroy(String clustername, ActionDestroyArgs destroyArgs)
-      throws YarnException, IOException;
 
-  int actionDestroy(String clustername) throws YarnException,
+  void actionDestroy(String clustername) throws YarnException,
       IOException;
 
   /**
@@ -88,18 +82,6 @@ public interface SliderClientAPI extends Service {
     throws SliderException;
 
   /**
-   * Build up the cluster specification/directory
-   *
-   * @param clustername cluster name
-   * @param buildInfo the arguments needed to build the cluster
-   * @throws YarnException Yarn problems
-   * @throws IOException other problems
-   * @throws BadCommandArgumentsException bad arguments.
-   */
-  int actionBuild(String clustername,
-      AbstractClusterBuildingActionArgs buildInfo) throws YarnException, IOException;
-
-  /**
    * Upload keytab to a designated sub-directory of the user home directory
    *
    * @param installKeytabInfo the arguments needed to upload the keytab
@@ -188,24 +170,7 @@ public interface SliderClientAPI extends Service {
    */
   int actionUpgrade(String clustername,
       ActionUpgradeArgs buildInfo)
-      throws YarnException, IOException; 
-
-  /**
-   * Get the report of a this application
-   * @return the app report or null if it could not be found.
-   * @throws IOException
-   * @throws YarnException
-   */
-  ApplicationReport getApplicationReport()
-      throws IOException, YarnException;
-
-  /**
-   * Kill the submitted application via YARN
-   * @throws YarnException
-   * @throws IOException
-   */
-  boolean forceKillApplication(String reason)
-    throws YarnException, IOException;
+      throws YarnException, IOException;
 
   /**
    * Implement the list action: list all nodes
@@ -213,30 +178,8 @@ public interface SliderClientAPI extends Service {
    */
   int actionList(String clustername, ActionListArgs args) throws IOException, YarnException;
 
-  /**
-   * Enumerate slider instances for the current user, and the
-   * most recent app report, where available.
-   * @param listOnlyInState boolean to indicate that the instances should
-   * only include those in a YARN state
-   * <code> minAppState &lt;= currentState &lt;= maxAppState </code>
-   *
-   * @param minAppState minimum application state to include in enumeration.
-   * @param maxAppState maximum application state to include
-   * @return a map of application instance name to description
-   * @throws IOException Any IO problem
-   * @throws YarnException YARN problems
-   */
-  Map<String, SliderInstanceDescription> enumSliderInstances(
-      boolean listOnlyInState,
-      YarnApplicationState minAppState,
-      YarnApplicationState maxAppState)
-      throws IOException, YarnException;
 
-  /**
-   * Implement the islive action: probe for a cluster of the given name existing
-   * @return exit code
-   */
-  int actionFlex(String name, ActionFlexArgs args) throws YarnException, IOException;
+  void actionFlex(String name, ActionFlexArgs args) throws YarnException, IOException;
 
   /**
    * Test for a cluster existing probe for a cluster of the given name existing
@@ -288,7 +231,7 @@ public interface SliderClientAPI extends Service {
    * @throws YarnException
    * @throws IOException
    */
-  String actionStatus(String clustername) throws YarnException, IOException;
+  Application actionStatus(String clustername) throws YarnException, IOException;
 
   /**
    * Version Details
@@ -303,13 +246,13 @@ public interface SliderClientAPI extends Service {
    * @param freezeArgs arguments to the stop
    * @return EXIT_SUCCESS if the cluster was not running by the end of the operation
    */
-  int actionFreeze(String clustername, ActionFreezeArgs freezeArgs)
+  void actionStop(String clustername, ActionFreezeArgs freezeArgs)
       throws YarnException, IOException;
 
   /**
    * Restore a cluster
    */
-  int actionThaw(String clustername, ActionThawArgs thaw) throws YarnException, IOException;
+  int actionStart(String clustername, ActionThawArgs thaw) throws YarnException, IOException;
 
   /**
    * Registry operation

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderYarnClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderYarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderYarnClientImpl.java
index 258ef31..4839395 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderYarnClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderYarnClientImpl.java
@@ -60,6 +60,10 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+//TODO, Remove this class and YarnAppListClient
+// why do we need so many yarn client wrappers ?
+// - yarn client already provides most of functionality already
+
 /**
  * A class that extends visibility to some of the YarnClientImpl
  * members and data structures, and factors out pure-YARN operations
@@ -69,25 +73,6 @@ public class SliderYarnClientImpl extends YarnClientImpl {
   protected static final Logger log = LoggerFactory.getLogger(SliderYarnClientImpl.class);
 
   /**
-   * Keyword to use in the {@link #emergencyForceKill(String)}
-   * operation to force kill <i>all</i> application instances belonging
-   * to a specific user
-   */
-  public static final String KILL_ALL = "all";
-
-  @Override
-  protected void serviceInit(Configuration conf) throws Exception {
-    InetSocketAddress clientRpcAddress = SliderUtils.getRmAddress(conf);
-    if (!SliderUtils.isAddressDefined(clientRpcAddress)) {
-      // address isn't known; fail fast
-      throw new BindException("Invalid " + YarnConfiguration.RM_ADDRESS
-          + " value:" + conf.get(YarnConfiguration.RM_ADDRESS)
-          + " - see https://wiki.apache.org/hadoop/UnsetHostnameOrPort");
-    }
-    super.serviceInit(conf);
-  }
-
-  /**
    * Get the RM Client RPC interface
    * @return an RPC interface valid after initialization and authentication
    */
@@ -96,52 +81,6 @@ public class SliderYarnClientImpl extends YarnClientImpl {
   }
 
   /**
-   * List Slider <i>running</i>instances belonging to a specific user.
-   * @deprecated use {@link #listDeployedInstances(String)}
-   * @param user user: "" means all users
-   * @return a possibly empty list of Slider AMs
-   */
-  public List<ApplicationReport> listInstances(String user)
-    throws YarnException, IOException {
-    return listDeployedInstances(user);
-  }
-
-  /**
-   * List Slider <i>deployed</i>instances belonging to a specific user.
-   * <p>
-   *   Deployed means: known about in the YARN cluster; it will include
-   *   any that are in the failed/finished state, as well as those queued
-   *   for starting.
-   * @param user user: "" means all users
-   * @return a possibly empty list of Slider AMs
-   */
-  public List<ApplicationReport> listDeployedInstances(String user)
-    throws YarnException, IOException {
-    return listDeployedInstances(user, null);
-  }
-
-  /**
-   * List Slider <i>deployed</i>instances belonging to a specific user in a
-   * given set of states.
-   * <p>
-   * Deployed means: known about in the YARN cluster; it will include all apps
-   * in the specified set of states.
-   *
-   * @param user
-   *          user: "" means all users
-   * @param appStates
-   *          filter by a set of YarnApplicationState
-   * @return a possibly empty list of Slider AMs
-   * @throws YarnException
-   * @throws IOException
-   */
-  public List<ApplicationReport> listDeployedInstances(String user,
-      EnumSet<YarnApplicationState> appStates)
-      throws YarnException, IOException {
-    return listDeployedInstances(user, appStates, null);
-  }
-
-  /**
    * List Slider <i>deployed</i>instances belonging to a specific user in a
    * given set of states and filtered by an application name tag.
    * <p>
@@ -178,21 +117,6 @@ public class SliderYarnClientImpl extends YarnClientImpl {
     }
     return results;
   }
-
-  /**
-   * find all instances of a specific app -if there is more than one in the
-   * YARN cluster,
-   * this returns them all
-   * @param user user; use "" for all users
-   * @param appname application name
-   * @return the list of all matching application instances
-   */
-  public List<ApplicationReport> findAllInstances(String user, String appname)
-      throws IOException, YarnException {
-    Preconditions.checkArgument(appname != null, "Null application name");
-
-    return listDeployedInstances(user, null, appname);
-  }
   
   /**
    * Helper method to determine if a cluster application is running -or
@@ -206,122 +130,6 @@ public class SliderYarnClientImpl extends YarnClientImpl {
     return app.getYarnApplicationState().ordinal() <= YarnApplicationState.RUNNING.ordinal();
   }
 
-
-  /**
-   * Kill a running application
-   * @param applicationId app Id
-   * @param reason reason: reason for log
-   * @return the response
-   * @throws YarnException YARN problems
-   * @throws IOException IO problems
-   */
-  public  KillApplicationResponse killRunningApplication(ApplicationId applicationId,
-                                                         String reason)
-      throws YarnException, IOException {
-    Preconditions.checkArgument(applicationId != null, "Null application Id");
-    log.info("Killing application {} - {}", applicationId.getClusterTimestamp(),
-             reason);
-    KillApplicationRequest request =
-      Records.newRecord(KillApplicationRequest.class);
-    request.setApplicationId(applicationId);
-    return getRmClient().forceKillApplication(request);
-  }
-
-  private String getUsername() throws IOException {
-    return UserGroupInformation.getCurrentUser().getShortUserName();
-  }
-  
-  /**
-   * Force kill a yarn application by ID. No niceties here
-   * @param applicationId app Id. "all" means "kill all instances of the current user
-   * 
-   */
-  public void emergencyForceKill(String applicationId)
-      throws YarnException, IOException {
-
-    Preconditions.checkArgument(StringUtils.isNotEmpty(applicationId),
-        "Null/empty application Id");
-
-    if (KILL_ALL.equals(applicationId)) {
-      // user wants all instances killed
-      String user = getUsername();
-      log.info("Killing all applications belonging to {}", user);
-      Collection<ApplicationReport> instances = listDeployedInstances(user,
-          SliderUtils.getAllLiveAppStates());
-      for (ApplicationReport instance : instances) {
-        ApplicationId appId = instance.getApplicationId();
-        log.info("Killing Application {}", appId);
-        killRunningApplication(appId, "forced kill");
-      }
-    } else {
-      ApplicationId appId = ConverterUtils.toApplicationId(applicationId);
-
-      log.info("Killing Application {}", applicationId);
-
-      killRunningApplication(appId, "forced kill");
-    }
-  }
-
-  /**
-   * Monitor the submitted application for reaching the requested state.
-   * Will also report if the app reaches a later state (failed, killed, etc)
-   * Kill application if duration!= null & time expires. 
-   * @param appId Application Id of application to be monitored
-   * @param duration how long to wait -must be more than 0
-   * @param desiredState desired state.
-   * @return the application report -null on a timeout
-   * @throws YarnException
-   * @throws IOException
-   */
-  public ApplicationReport monitorAppToState(
-    ApplicationId appId, YarnApplicationState desiredState, Duration duration)
-    throws YarnException, IOException {
-
-    if (appId == null) {
-      throw new BadCommandArgumentsException("null application ID");
-    }
-    if (duration.limit <= 0) {
-      throw new BadCommandArgumentsException("Invalid monitoring duration");
-    }
-    log.debug("Waiting {} millis for app to reach state {} ",
-              duration.limit,
-              desiredState);
-    duration.start();
-    try {
-      while (true) {
-        // Get application report for the appId we are interested in
-
-        ApplicationReport r = getApplicationReport(appId);
-
-        log.debug("queried status is\n{}",
-          new SliderUtils.OnDemandReportStringifier(r));
-
-        YarnApplicationState state = r.getYarnApplicationState();
-        if (state.ordinal() >= desiredState.ordinal()) {
-          log.debug("App in desired state (or higher) :{}", state);
-          return r;
-        }
-        if (duration.getLimitExceeded()) {
-          log.debug(
-            "Wait limit of {} millis to get to state {}, exceeded; app status\n {}",
-            duration.limit,
-            desiredState,
-            new SliderUtils.OnDemandReportStringifier(r));
-          return null;
-        }
-
-        // sleep 1s.
-        try {
-          Thread.sleep(1000);
-        } catch (InterruptedException ignored) {
-          log.debug("Thread sleep in monitoring loop interrupted");
-        }
-      }
-    } finally {
-      duration.close();
-    }
-  }
-
   /**
    * find all live instances of a specific app -if there is >1 in the cluster,
    * this returns them all. State should be running or less

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderApplicationIpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderApplicationIpcClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderApplicationIpcClient.java
index a007326..3b5147f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderApplicationIpcClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderApplicationIpcClient.java
@@ -112,17 +112,6 @@ public class SliderApplicationIpcClient implements SliderApplicationApi {
     }
   }
 
-
-  @Override
-  public void putDesiredResources(ConfTree updated) throws IOException {
-    try {
-      operations.flex(updated);
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  
   @Override
   public AggregateConf getResolvedModel() throws IOException {
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java
index 702233a..623b8b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java
@@ -25,6 +25,8 @@ import org.apache.slider.api.ClusterNode;
 import org.apache.slider.api.SliderClusterProtocol;
 import org.apache.slider.api.StateValues;
 import org.apache.slider.api.proto.Messages;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
 import org.apache.slider.api.types.ApplicationLivenessInformation;
 import org.apache.slider.api.types.ComponentInformation;
 import org.apache.slider.api.types.ContainerInformation;
@@ -39,6 +41,7 @@ import org.apache.slider.core.exceptions.NoSuchNodeException;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.exceptions.WaitTimeoutException;
 import org.apache.slider.core.persist.ConfTreeSerDeser;
+import org.apache.slider.core.persist.JsonSerDeser;
 import org.codehaus.jackson.JsonParseException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -61,6 +64,8 @@ public class SliderClusterOperations {
     log = LoggerFactory.getLogger(SliderClusterOperations.class);
   
   private final SliderClusterProtocol appMaster;
+  private static final JsonSerDeser<Application> jsonSerDeser =
+      new JsonSerDeser<Application>(Application.class);
   private static final Messages.EmptyPayloadProto EMPTY;
   static {
     EMPTY = Messages.EmptyPayloadProto.newBuilder().build(); 
@@ -130,49 +135,21 @@ public class SliderClusterOperations {
    * Connect to a live cluster and get its current state
    * @return its description
    */
-  public ClusterDescription getClusterDescription()
-    throws YarnException, IOException {
-    
+  public Application getApplication() throws YarnException, IOException {
     Messages.GetJSONClusterStatusRequestProto req =
       Messages.GetJSONClusterStatusRequestProto.newBuilder().build();
     Messages.GetJSONClusterStatusResponseProto resp =
       appMaster.getJSONClusterStatus(req);
     String statusJson = resp.getClusterSpec();
     try {
-      return ClusterDescription.fromJson(statusJson);
+      return jsonSerDeser.fromJson(statusJson);
     } catch (JsonParseException e) {
-      log.error("Exception " + e + " parsing:\n" + statusJson, e);
+      log.error("Error when parsing app json file", e);
       throw e;
     }
   }
 
   /**
-   * Get the AM instance definition.
-   * <p>
-   *   See {@link SliderClusterProtocol#getInstanceDefinition(Messages.GetInstanceDefinitionRequestProto)}
-   * @return current slider AM aggregate definition
-   * @throws YarnException
-   * @throws IOException
-   */
-  public AggregateConf getInstanceDefinition()
-    throws YarnException, IOException {
-    Messages.GetInstanceDefinitionRequestProto.Builder builder =
-      Messages.GetInstanceDefinitionRequestProto.newBuilder();
-
-    Messages.GetInstanceDefinitionRequestProto request = builder.build();
-    Messages.GetInstanceDefinitionResponseProto response =
-      appMaster.getInstanceDefinition(request);
-
-    ConfTreeSerDeser confTreeSerDeser = new ConfTreeSerDeser();
-
-    ConfTree internal = confTreeSerDeser.fromJson(response.getInternal());
-    ConfTree resources = confTreeSerDeser.fromJson(response.getResources());
-    ConfTree app = confTreeSerDeser.fromJson(response.getApplication());
-    AggregateConf instanceDefinition =
-      new AggregateConf(resources, app, internal);
-    return instanceDefinition;
-  }
-  /**
    * Kill a container
    * @param id container ID
    * @return a success flag
@@ -315,22 +292,14 @@ public class SliderClusterOperations {
     return state;
   }
 
-  /**
-   * Flex operation
-   * @param resources new resources
-   * @return the response
-   * @throws IOException
-   */
-  public boolean flex(ConfTree resources) throws IOException {
-    Messages.FlexClusterRequestProto request =
-      Messages.FlexClusterRequestProto.newBuilder()
-              .setClusterSpec(resources.toJson())
-              .build();
-    Messages.FlexClusterResponseProto response = appMaster.flexCluster(request);
-    return response.getResponse();
+  public void flex(Component component) throws IOException{
+    Messages.FlexComponentRequestProto request =
+        Messages.FlexComponentRequestProto.newBuilder()
+            .setNumberOfContainers(component.getNumberOfContainers().intValue())
+            .setName(component.getName()).build();
+        appMaster.flexComponent(request);
   }
 
-
   /**
    * Commit (possibly delayed) AM suicide
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/rest/SliderApplicationApiRestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/rest/SliderApplicationApiRestClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/rest/SliderApplicationApiRestClient.java
index 573ef64..4c376e0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/rest/SliderApplicationApiRestClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/rest/SliderApplicationApiRestClient.java
@@ -177,29 +177,6 @@ public class SliderApplicationApiRestClient extends BaseRestClient
   }
 
   @Override
-  public void putDesiredResources(ConfTree updated) throws IOException {
-    WebResource resource = applicationResource(MODEL_DESIRED_RESOURCES);
-    try {
-
-      // put operation. The result is discarded; it does help validate
-      // that the operation returned a JSON data structure as well as a 200
-      // response.
-
-      resource.accept(MediaType.APPLICATION_JSON_TYPE)
-              .type(MediaType.APPLICATION_JSON_TYPE)
-              .entity(updated)
-              .put(ConfTree.class);
-    } catch (ClientHandlerException ex) {
-        throw ExceptionConverter.convertJerseyException("PUT",
-            resource.getURI().toString(),
-            ex);
-      } catch (UniformInterfaceException ex) {
-      throw ExceptionConverter.convertJerseyException("PUT",
-          resource.getURI().toString(), ex);
-      }
-  }
-
-  @Override
   public AggregateConf getResolvedModel() throws IOException {
     return getApplicationResource(MODEL_RESOLVED, AggregateConf.class);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
index 4bf1b5b..9a4fa6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
@@ -18,6 +18,8 @@
 
 package org.apache.slider.common;
 
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -50,7 +52,7 @@ public interface SliderKeys extends SliderXmlConfKeys {
   String SLIDER_DEPENDENCY_LOCALIZED_DIR_LINK = "slider_dep";
   String SLIDER_DEPENDENCY_HDP_PARENT_DIR = "/hdp";
   String SLIDER_DEPENDENCY_DIR = "/apps/%s/slider";
-  String SLIDER_DEPENDENCY_TAR_GZ_FILE_NAME = "slider";
+  String SLIDER_DEPENDENCY_TAR_GZ_FILE_NAME = "slider-dep";
   String SLIDER_DEPENDENCY_TAR_GZ_FILE_EXT = ".tar.gz";
   String SLIDER_DEPENDENCY_DIR_PERMISSIONS = "755";
 
@@ -181,7 +183,7 @@ public interface SliderKeys extends SliderXmlConfKeys {
   /**
    * name of generated dir for this conf: {@value}
    */
-  String SUBMITTED_CONF_DIR = "confdir";
+  String SUBMITTED_CONF_DIR = "conf";
 
   /**
    * Slider AM log4j file name : {@value}
@@ -227,7 +229,7 @@ public interface SliderKeys extends SliderXmlConfKeys {
    */
   String ADDONS_DIR = "addons";
 
-  String SLIDER_JAR = "slider.jar";
+  String SLIDER_JAR = "slider-core.jar";
   String JCOMMANDER_JAR = "jcommander.jar";
   String GSON_JAR = "gson.jar";
   String DEFAULT_APP_PKG = "appPkg.zip";
@@ -238,7 +240,7 @@ public interface SliderKeys extends SliderXmlConfKeys {
   String STDERR_AM = "slider-err.txt";
   String DEFAULT_GC_OPTS = "";
 
-  String HADOOP_USER_NAME = "HADOOP_USER_NAME";
+  String HADOOP_USER_NAME = ApplicationConstants.Environment.USER.toString();
   String HADOOP_PROXY_USER = "HADOOP_PROXY_USER";
   String SLIDER_PASSPHRASE = "SLIDER_PASSPHRASE";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderXmlConfKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderXmlConfKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderXmlConfKeys.java
index 72dd44f..b666834 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderXmlConfKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderXmlConfKeys.java
@@ -73,6 +73,9 @@ public interface SliderXmlConfKeys {
   int DEFAULT_YARN_QUEUE_PRIORITY = 1;
 
 
+  String KEY_AM_RESOURCE_MEM = "slider.am.resource.memory";
+  long DEFAULT_KEY_AM_RESOURCE_MEM = 1024;
+
   /**
    * The slider base path: {@value}
    * Defaults to HomeDir/.slider

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractActionArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractActionArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractActionArgs.java
index 63ccff8..6dd61fa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractActionArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractActionArgs.java
@@ -69,7 +69,7 @@ public abstract class AbstractActionArgs extends ArgOps implements Arguments {
   /**
    -D name=value
 
-   Define an HBase configuration option which overrides any options in
+   Define an configuration option which overrides any options in
    the configuration XML files of the image or in the image configuration
    directory. The values will be persisted.
    Configuration options are only passed to the cluster when creating or reconfiguring a cluster.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractClusterBuildingActionArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractClusterBuildingActionArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractClusterBuildingActionArgs.java
index 1d28c78..2d471ed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractClusterBuildingActionArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AbstractClusterBuildingActionArgs.java
@@ -35,186 +35,13 @@ import java.util.Map;
  * Abstract Action to build things; shares args across build and
  * list
  */
-public abstract class AbstractClusterBuildingActionArgs extends
-    AbstractActionArgs {
-
-  /**
-   * Declare the image configuration directory to use when creating or
-   * reconfiguring a slider cluster. The path must be on a filesystem visible
-   * to all nodes in the YARN cluster. Only one configuration directory can
-   * be specified.
-   */
-  @Parameter(names = ARG_CONFDIR,
-      description = "Path to cluster configuration directory in HDFS",
-      converter = PathArgumentConverter.class)
-  public Path confdir;
-
-  @Parameter(names = ARG_ZKPATH,
-      description = "Zookeeper path for the application")
-  public String appZKPath;
-
-  @Parameter(names = ARG_ZKHOSTS,
-      description = "comma separated list of the Zookeeper hosts")
-  public String zkhosts;
-
-  /**
-   * --image path
-   * the full path to a .tar or .tar.gz path containing an HBase image.
-   */
-  @Parameter(names = ARG_IMAGE,
-      description = "The full path to a .tar or .tar.gz path containing the application",
-      converter = PathArgumentConverter.class)
-  public Path image;
-
-  @Parameter(names = ARG_APP_HOME,
-      description = "Home directory of a pre-installed application")
-  public String appHomeDir;
-
-  @Parameter(names = ARG_PROVIDER,
-      description = "Provider of the specific cluster application")
-  public String provider = SliderProviderFactory.DEFAULT_CLUSTER_TYPE;
-
-  @Parameter(names = {ARG_PACKAGE},
-      description = "URI to a slider package")
-  public String packageURI;
-
-  @Parameter(names = {ARG_RESOURCES},
-      description = "File defining the resources of this instance")
-  public File resources;
-
-  @Parameter(names = {ARG_TEMPLATE},
-      description = "Template application configuration")
-  public File template;
-
-  @Parameter(names = {ARG_METAINFO},
-      description = "Application meta info file")
-  public File appMetaInfo;
-
-  @Parameter(names = {ARG_METAINFO_JSON},
-      description = "Application meta info JSON blob")
-  public String appMetaInfoJson;
-
-  @Parameter(names = {ARG_APPDEF},
-      description = "Application def (folder or a zip package)")
-  public File appDef;
-
-  @Parameter(names = {ARG_QUEUE},
-             description = "Queue to submit the application")
+public abstract class AbstractClusterBuildingActionArgs
+    extends AbstractActionArgs {
+  @Parameter(names = {
+      ARG_QUEUE }, description = "Queue to submit the application")
   public String queue;
 
-  @Parameter(names = {ARG_LIFETIME},
-      description = "Lifetime of the application from the time of request")
+  @Parameter(names = {
+      ARG_LIFETIME }, description = "Lifetime of the application from the time of request")
   public long lifetime;
-
-  @ParametersDelegate
-  public ComponentArgsDelegate componentDelegate = new ComponentArgsDelegate();
-
-  @ParametersDelegate
-  public AddonArgsDelegate addonDelegate = new AddonArgsDelegate();
-
-
-  @ParametersDelegate
-  public AppAndResouceOptionArgsDelegate optionsDelegate =
-      new AppAndResouceOptionArgsDelegate();
-
-
-  public Map<String, String> getOptionsMap() throws
-      BadCommandArgumentsException {
-    return optionsDelegate.getOptionsMap();
-  }
-
-  /**
-   * Get the role heap mapping (may be empty, but never null)
-   * @return role heap mapping
-   * @throws BadCommandArgumentsException parse problem
-   */
-  public Map<String, Map<String, String>> getCompOptionMap() throws
-      BadCommandArgumentsException {
-    return optionsDelegate.getCompOptionMap();
-  }
-
-
-  public Map<String, String> getResourceOptionsMap() throws
-      BadCommandArgumentsException {
-    return optionsDelegate.getResourceOptionsMap();
-  }
-
-  /**
-   * Get the role heap mapping (may be empty, but never null)
-   * @return role heap mapping
-   * @throws BadCommandArgumentsException parse problem
-   */
-  public Map<String, Map<String, String>> getResourceCompOptionMap() throws
-      BadCommandArgumentsException {
-    return optionsDelegate.getResourceCompOptionMap();
-  }
-
-  @VisibleForTesting
-  public List<String> getComponentTuples() {
-    return componentDelegate.getComponentTuples();
-  }
-
-  /**
-   * Get the role mapping (may be empty, but never null)
-   * @return role mapping
-   * @throws BadCommandArgumentsException parse problem
-   */
-  public Map<String, String> getComponentMap() throws
-      BadCommandArgumentsException {
-    return componentDelegate.getComponentMap();
-  }
-
-  @VisibleForTesting
-  public List<String> getAddonTuples() {
-    return addonDelegate.getAddonTuples();
-  }
-
-  /**
-   * Get the list of addons (may be empty, but never null)
-   */
-  public Map<String, String> getAddonMap() throws
-      BadCommandArgumentsException {
-    return addonDelegate.getAddonMap();
-  }
-
-  public Path getConfdir() {
-    return confdir;
-  }
-
-  public String getAppZKPath() {
-    return appZKPath;
-  }
-
-  public String getZKhosts() {
-    return zkhosts;
-  }
-
-  public Path getImage() {
-    return image;
-  }
-
-  public String getAppHomeDir() {
-    return appHomeDir;
-  }
-
-  public String getProvider() {
-    return provider;
-  }
-
-  public ConfTree buildAppOptionsConfTree() throws
-      BadCommandArgumentsException {
-    return buildConfTree(getOptionsMap());
-  }
-
-  public ConfTree buildResourceOptionsConfTree() throws
-      BadCommandArgumentsException {
-    return buildConfTree(getResourceOptionsMap());
-  }
-
-  protected ConfTree buildConfTree(Map<String, String> optionsMap) throws
-      BadCommandArgumentsException {
-    ConfTree confTree = new ConfTree();
-    confTree.global.putAll(optionsMap);
-    return confTree;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionBuildArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionBuildArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionBuildArgs.java
deleted file mode 100644
index 1a182d1..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionBuildArgs.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.common.params;
-
-import com.beust.jcommander.Parameters;
-
-@Parameters(commandNames = {SliderActions.ACTION_BUILD},
-            commandDescription = SliderActions.DESCRIBE_ACTION_BUILD)
-
-public class ActionBuildArgs extends AbstractClusterBuildingActionArgs {
-
-  @Override
-  public String getActionName() {
-    return SliderActions.ACTION_BUILD;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionCreateArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionCreateArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionCreateArgs.java
index e70f30a..c8cac65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionCreateArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionCreateArgs.java
@@ -18,6 +18,7 @@
 
 package org.apache.slider.common.params;
 
+import com.beust.jcommander.Parameter;
 import com.beust.jcommander.Parameters;
 import com.beust.jcommander.ParametersDelegate;
 
@@ -26,34 +27,19 @@ import java.io.File;
 @Parameters(commandNames = {SliderActions.ACTION_CREATE},
             commandDescription = SliderActions.DESCRIBE_ACTION_CREATE)
 
-public class ActionCreateArgs extends AbstractClusterBuildingActionArgs
-  implements WaitTimeAccessor, LaunchArgsAccessor {
-  
-  @Override
-  public String getActionName() {
-    return SliderActions.ACTION_CREATE;
-  }
-  
-  @ParametersDelegate
-  LaunchArgsDelegate launchArgs = new LaunchArgsDelegate();
+public class ActionCreateArgs extends AbstractClusterBuildingActionArgs {
 
-  @Override
-  public File getOutputFile() {
-    return launchArgs.getOutputFile();
-  }
+  @Parameter(names = {ARG_APPDEF},
+      description = "Template application definition file in JSON format.")
+  public File appDef;
 
-  @Override
-  public String getRmAddress() {
-    return launchArgs.getRmAddress();
+  public File getAppDef() {
+    return appDef;
   }
 
   @Override
-  public int getWaittime() {
-    return launchArgs.getWaittime();
-  }
-
-  @Override
-  public void setWaittime(int waittime) {
-    launchArgs.setWaittime(waittime);
+  public String getActionName() {
+    return SliderActions.ACTION_CREATE;
   }
 }
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionFlexArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionFlexArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionFlexArgs.java
index 725973e..c565484 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionFlexArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionFlexArgs.java
@@ -18,37 +18,31 @@
 
 package org.apache.slider.common.params;
 
+import com.beust.jcommander.Parameter;
 import com.beust.jcommander.Parameters;
-import com.beust.jcommander.ParametersDelegate;
-import org.apache.slider.core.exceptions.BadCommandArgumentsException;
-
-import java.util.List;
-import java.util.Map;
 
 @Parameters(commandNames = {SliderActions.ACTION_FLEX},
             commandDescription = SliderActions.DESCRIBE_ACTION_FLEX)
 
 public class ActionFlexArgs extends AbstractActionArgs {
 
+  @Parameter(names = {ARG_COMPONENT},
+      description = "component name")
+  String componentName;
+
+  @Parameter(names = {ARG_COUNT},
+      description = "number of containers>")
+  long numberOfContainers;
+
   @Override
   public String getActionName() {
     return SliderActions.ACTION_FLEX;
   }
-  
-  @ParametersDelegate
-  public ComponentArgsDelegate componentDelegate = new ComponentArgsDelegate();
-
-  /**
-   * Get the component mapping (may be empty, but never null)
-   * @return mapping
-   * @throws BadCommandArgumentsException parse problem
-   */
-  public Map<String, String> getComponentMap() throws BadCommandArgumentsException {
-    return componentDelegate.getComponentMap();
-  }
 
-  public List<String> getComponentTuples() {
-    return componentDelegate.getComponentTuples();
+  public String getComponent() {
+    return componentName;
+  }
+  public long getNumberOfContainers() {
+    return numberOfContainers;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionFreezeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionFreezeArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionFreezeArgs.java
index e3085d9..f3cc6ef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionFreezeArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionFreezeArgs.java
@@ -22,14 +22,14 @@ import com.beust.jcommander.Parameter;
 import com.beust.jcommander.Parameters;
 import com.beust.jcommander.ParametersDelegate;
 
-@Parameters(commandNames = {SliderActions.ACTION_FREEZE},
+@Parameters(commandNames = {SliderActions.ACTION_STOP },
             commandDescription = SliderActions.DESCRIBE_ACTION_FREEZE)
 
 public class ActionFreezeArgs extends AbstractActionArgs implements
                                                          WaitTimeAccessor {
   @Override
   public String getActionName() {
-    return SliderActions.ACTION_FREEZE;
+    return SliderActions.ACTION_STOP;
   }
   
   public static final String FREEZE_COMMAND_ISSUED = "stop command issued";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionThawArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionThawArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionThawArgs.java
index 2bd856f..04988c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionThawArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionThawArgs.java
@@ -24,7 +24,7 @@ import com.beust.jcommander.ParametersDelegate;
 
 import java.io.File;
 
-@Parameters(commandNames = {SliderActions.ACTION_THAW},
+@Parameters(commandNames = {SliderActions.ACTION_START },
             commandDescription = SliderActions.DESCRIBE_ACTION_THAW)
 public class ActionThawArgs extends AbstractActionArgs implements
                                                        WaitTimeAccessor,
@@ -33,7 +33,7 @@ public class ActionThawArgs extends AbstractActionArgs implements
 
   @Override
   public String getActionName() {
-    return SliderActions.ACTION_THAW;
+    return SliderActions.ACTION_START;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionUpgradeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionUpgradeArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionUpgradeArgs.java
index 6ef51b2..18aa1f5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionUpgradeArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionUpgradeArgs.java
@@ -18,56 +18,31 @@
 
 package org.apache.slider.common.params;
 
-import java.io.File;
-import java.util.ArrayList;
-import java.util.List;
-
 import com.beust.jcommander.Parameter;
 import com.beust.jcommander.Parameters;
-import com.beust.jcommander.ParametersDelegate;
+
+import java.util.ArrayList;
+import java.util.List;
 
 @Parameters(commandNames = { SliderActions.ACTION_UPGRADE },
             commandDescription = SliderActions.DESCRIBE_ACTION_UPGRADE)
-public class ActionUpgradeArgs extends AbstractClusterBuildingActionArgs
-    implements WaitTimeAccessor, LaunchArgsAccessor {
+public class ActionUpgradeArgs extends AbstractActionArgs {
 
   @Override
   public String getActionName() {
     return SliderActions.ACTION_UPGRADE;
   }
-
-  @ParametersDelegate
-  LaunchArgsDelegate launchArgs = new LaunchArgsDelegate();
-
-  @Override
-  public File getOutputFile() {
-    return launchArgs.getOutputFile();
-  }
-
-  @Override
-  public String getRmAddress() {
-    return launchArgs.getRmAddress();
-  }
-
-  @Override
-  public int getWaittime() {
-    return launchArgs.getWaittime();
-  }
-
-  @Override
-  public void setWaittime(int waittime) {
-    launchArgs.setWaittime(waittime);
-  }
-
-  @Parameter(names={ARG_CONTAINERS}, variableArity = true,
-             description = "stop specific containers")
-  public List<String> containers = new ArrayList<>(0);
-
-  @Parameter(names={ARG_COMPONENTS}, variableArity = true,
-      description = "stop all containers of specific components")
-  public List<String> components = new ArrayList<>(0);
-
-  @Parameter(names = {ARG_FORCE},
-      description = "force spec upgrade operation")
-  public boolean force;
+  
+//  TODO upgrade container
+//  @Parameter(names={ARG_CONTAINERS}, variableArity = true,
+//             description = "stop specific containers")
+//  public List<String> containers = new ArrayList<>(0);
+//
+//  @Parameter(names={ARG_COMPONENTS}, variableArity = true,
+//      description = "stop all containers of specific components")
+//  public List<String> components = new ArrayList<>(0);
+//
+//  @Parameter(names = {ARG_FORCE},
+//      description = "force spec upgrade operation")
+//  public boolean force;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/Arguments.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
index cbf7e59..45c1fbd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
@@ -36,6 +36,7 @@ public interface Arguments {
   String ARG_CLIENT = "--client";
   String ARG_CONFDIR = "--appconf";
   String ARG_COMPONENT = "--component";
+  String ARG_COUNT = "--count";
   String ARG_COMPONENT_SHORT = "--comp";
   String ARG_COMPONENTS = "--components";
   String ARG_COMP_OPT= "--compopt";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
index 4016cc9..abd2ce7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
@@ -53,7 +53,6 @@ public class ClientArgs extends CommonArgs {
   // =========================================================
 
   private final ActionAMSuicideArgs actionAMSuicideArgs = new ActionAMSuicideArgs();
-  private final ActionBuildArgs actionBuildArgs = new ActionBuildArgs();
   private final ActionClientArgs actionClientArgs = new ActionClientArgs();
   private final ActionCreateArgs actionCreateArgs = new ActionCreateArgs();
   private final ActionDependencyArgs actionDependencyArgs = new ActionDependencyArgs();
@@ -96,7 +95,6 @@ public class ClientArgs extends CommonArgs {
 
     addActions(
         actionAMSuicideArgs,
-        actionBuildArgs,
         actionClientArgs,
         actionCreateArgs,
         actionDependencyArgs,
@@ -155,10 +153,6 @@ public class ClientArgs extends CommonArgs {
     return actionAMSuicideArgs;
   }
 
-  public ActionBuildArgs getActionBuildArgs() {
-    return actionBuildArgs;
-  }
-
   public ActionInstallPackageArgs getActionInstallPackageArgs() { return actionInstallPackageArgs; }
 
   public ActionClientArgs getActionClientArgs() { return actionClientArgs; }
@@ -256,23 +250,17 @@ public class ClientArgs extends CommonArgs {
       action = ACTION_HELP;
     }
     switch (action) {
-      case ACTION_BUILD:
-        bindCoreAction(actionBuildArgs);
-        //its a builder, so set those actions too
-        buildingActionArgs = actionBuildArgs;
-        break;
-
       case ACTION_CREATE:
         bindCoreAction(actionCreateArgs);
         //its a builder, so set those actions too
         buildingActionArgs = actionCreateArgs;
         break;
 
-      case ACTION_FREEZE:
+      case ACTION_STOP:
         bindCoreAction(actionFreezeArgs);
         break;
 
-      case ACTION_THAW:
+      case ACTION_START:
         bindCoreAction(actionThawArgs);
         break;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderAMArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderAMArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderAMArgs.java
index f9516d1..de65954 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderAMArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderAMArgs.java
@@ -43,7 +43,7 @@ public class SliderAMArgs extends CommonArgs {
    * This is the URI in the FS to the Slider cluster; the conf file (and any
    * other cluster-specifics) can be picked up here
    */
-  public String getSliderClusterURI() {
+  public String getAppDefDir() {
     return createAction.sliderClusterURI;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
index 204ad9a..82e5903 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
@@ -36,7 +36,7 @@ public interface SliderActions {
   String ACTION_ECHO = "echo";
   String ACTION_EXISTS = "exists";
   String ACTION_FLEX = "flex";
-  String ACTION_FREEZE = "stop";
+  String ACTION_STOP = "stop";
   String ACTION_HELP = "help";
   String ACTION_INSTALL_KEYTAB = "install-keytab";
   String ACTION_INSTALL_PACKAGE = "install-package";
@@ -53,7 +53,7 @@ public interface SliderActions {
   String ACTION_RESOLVE = "resolve";
   String ACTION_RESOURCE = "resource";
   String ACTION_STATUS = "status";
-  String ACTION_THAW = "start";
+  String ACTION_START = "start";
   String ACTION_TOKENS = "tokens";
 
   String ACTION_VERSION = "version";
@@ -68,7 +68,7 @@ public interface SliderActions {
   String DESCRIBE_ACTION_UPDATE =
       "Update template for a Slider application";
   String DESCRIBE_ACTION_UPGRADE =
-      "Rolling upgrade/downgrade the application to a newer/previous version";
+      "Rolling upgrade/downgrade the component/containerto a newer/previous version";
   String DESCRIBE_ACTION_DESTROY =
         "Destroy a stopped Slider application";
   String DESCRIBE_ACTION_EXISTS =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
index c3d6d98..5919312 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
@@ -511,6 +511,7 @@ public class CoreFileSystem {
    * @return the parent dir path of slider.tar.gz in HDFS
    */
   public Path getDependencyPath() {
+    // FIXME: 3/20/17 HDP ???????????
     String parentDir = (SliderUtils.isHdp()) ? SliderKeys.SLIDER_DEPENDENCY_HDP_PARENT_DIR
         + SliderKeys.SLIDER_DEPENDENCY_DIR
         : SliderKeys.SLIDER_DEPENDENCY_DIR;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderFileSystem.java
index 294f37e..40b07bd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderFileSystem.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderFileSystem.java
@@ -20,6 +20,7 @@ package org.apache.slider.common.tools;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 
 import java.io.IOException;
 
@@ -29,6 +30,8 @@ import java.io.IOException;
  */
 public class SliderFileSystem extends CoreFileSystem {
 
+  Path appDir = null;
+
   public SliderFileSystem(FileSystem fileSystem,
       Configuration configuration) {
     super(fileSystem, configuration);
@@ -38,5 +41,11 @@ public class SliderFileSystem extends CoreFileSystem {
     super(configuration);
   }
 
+  public void setAppDir(Path appDir) {
+    this.appDir = appDir;
+  }
 
+  public Path getAppDir() {
+    return this.appDir;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
index 20c7831..c0ef2d4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
@@ -499,9 +499,6 @@ public final class SliderUtils {
           "Source file not a file " + srcFile);
     }
     FileSystem destFS = FileSystem.get(destFile.toUri(), conf);
-    if (destFS.exists(destFile)) {
-      throw new IOException("Dest file already exists " + destFile);
-    }
     FileUtil.copy(srcFS, srcFile, destFS, destFile, false, true, conf);
   }
 
@@ -1221,6 +1218,29 @@ public final class SliderUtils {
     return buildEnvMap(roleOpts, null);
   }
 
+
+  // Build env map: key -> value;
+  // value will be replaced by the corresponding value in tokenMap, if any.
+  public static Map<String, String> buildEnvMap(
+      org.apache.slider.api.resource.Configuration conf,
+      Map<String,String> tokenMap) {
+    if (tokenMap == null) {
+      return conf.getEnv();
+    }
+    Map<String, String> env = new HashMap<>();
+    for (Map.Entry<String, String> entry : conf.getEnv().entrySet()) {
+      String key = entry.getKey();
+      String val = entry.getValue();
+      for (Map.Entry<String,String> token : tokenMap.entrySet()) {
+        val = val.replaceAll(Pattern.quote(token.getKey()),
+            token.getValue());
+      }
+      env.put(key,val);
+    }
+    return env;
+  }
+
+
   public static Map<String, String> buildEnvMap(Map<String, String> roleOpts,
       Map<String,String> tokenMap) {
     Map<String, String> env = new HashMap<>();
@@ -1273,8 +1293,8 @@ public final class SliderUtils {
    * @param clustername cluster name
    * @throws BadCommandArgumentsException if it is invalid
    */
-  public static void validateClusterName(String clustername) throws
-      BadCommandArgumentsException {
+  public static void validateClusterName(String clustername)
+      throws BadCommandArgumentsException {
     if (!isClusternameValid(clustername)) {
       throw new BadCommandArgumentsException(
           "Illegal cluster name: " + clustername);
@@ -1603,14 +1623,12 @@ public final class SliderUtils {
    * @param sliderConfDir relative path to the dir containing slider config
    *                      options to put on the classpath -or null
    * @param libdir directory containing the JAR files
-   * @param config the configuration
    * @param usingMiniMRCluster flag to indicate the MiniMR cluster is in use
    * (and hence the current classpath should be used, not anything built up)
    * @return a classpath
    */
   public static ClasspathConstructor buildClasspath(String sliderConfDir,
       String libdir,
-      Configuration config,
       SliderFileSystem sliderFileSystem,
       boolean usingMiniMRCluster) {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AppMasterLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AppMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AppMasterLauncher.java
deleted file mode 100644
index 7190c3a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AppMasterLauncher.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.core.launch;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.client.api.YarnClientApplication;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.util.Records;
-import org.apache.slider.client.SliderYarnClientImpl;
-import org.apache.slider.common.tools.CoreFileSystem;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.Set;
-
-public class AppMasterLauncher extends AbstractLauncher {
-
-
-  private static final Logger log =
-    LoggerFactory.getLogger(AppMasterLauncher.class);
-
-  public final YarnClientApplication application;
-  public final String name;
-  public final String type;
-  public final ApplicationSubmissionContext submissionContext;
-  public final ApplicationId appId;
-  public final boolean secureCluster;
-  private int maxAppAttempts = 0;
-  private boolean keepContainersOverRestarts = true;
-  private String queue = YarnConfiguration.DEFAULT_QUEUE_NAME;
-  private int priority = 1;
-  private final Resource resource = Records.newRecord(Resource.class);
-  private final SliderYarnClientImpl yarnClient;
-  private Long submitTime;
-
-  /**
-   * Build the AM Launcher
-   * @param name app name
-   * @param type application type
-   * @param conf hadoop config
-   * @param fs filesystem binding
-   * @param yarnClient yarn client
-   * @param secureCluster flag to indicate secure cluster
-   * @param options map of options. All values are extracted in this constructor only
-   * @param resourceGlobalOptions global options
-   * @param applicationTags any app tags
-   * @param credentials initial set of credentials
-   * @throws IOException
-   * @throws YarnException
-   */
-  public AppMasterLauncher(String name,
-      String type,
-      Configuration conf,
-      CoreFileSystem fs,
-      SliderYarnClientImpl yarnClient,
-      boolean secureCluster,
-      Map<String, String> options,
-      Map<String, String> resourceGlobalOptions,
-      Set<String> applicationTags,
-      Credentials credentials) throws IOException, YarnException {
-    super(conf, fs, credentials);
-    this.yarnClient = yarnClient;
-    this.application = yarnClient.createApplication();
-    this.name = name;
-    this.type = type;
-    this.secureCluster = secureCluster;
-
-    submissionContext = application.getApplicationSubmissionContext();
-    appId = submissionContext.getApplicationId();
-    // set the application name;
-    submissionContext.setApplicationName(name);
-    // app type used in service enum;
-    submissionContext.setApplicationType(type);
-    if (!applicationTags.isEmpty()) {
-      submissionContext.setApplicationTags(applicationTags);
-    }
-    submissionContext.setNodeLabelExpression(extractLabelExpression(options));
-
-    extractAmRetryCount(submissionContext, resourceGlobalOptions);
-    extractResourceRequirements(resource, options);
-    extractLogAggregationContext(resourceGlobalOptions);
-  }
-
-  public void setMaxAppAttempts(int maxAppAttempts) {
-    this.maxAppAttempts = maxAppAttempts;
-  }
-
-  public void setKeepContainersOverRestarts(boolean keepContainersOverRestarts) {
-    this.keepContainersOverRestarts = keepContainersOverRestarts;
-  }
-
-
-  public Resource getResource() {
-    return resource;
-  }
-
-  public void setMemory(int memory) {
-    resource.setMemory(memory);
-  }
-
-  public void setVirtualCores(int cores) {
-    resource.setVirtualCores(cores);
-  }
-
-  public ApplicationId getApplicationId() {
-    return appId;
-  }
-
-  public int getMaxAppAttempts() {
-    return maxAppAttempts;
-  }
-
-  public boolean isKeepContainersOverRestarts() {
-    return keepContainersOverRestarts;
-  }
-
-  public String getQueue() {
-    return queue;
-  }
-
-  public int getPriority() {
-    return priority;
-  }
-
-  public void setQueue(String queue) {
-    this.queue = queue;
-  }
-
-  public void setPriority(int priority) {
-    this.priority = priority;
-  }
-
-  /**
-   * Complete the launch context (copy in env vars, etc).
-   * @return the container to launch
-   */
-  public ApplicationSubmissionContext completeAppMasterLaunch()
-      throws IOException {
-
-    //queue priority
-    Priority pri = Records.newRecord(Priority.class);
-    pri.setPriority(priority);
-    submissionContext.setPriority(pri);
-
-    // Set the queue to which this application is to be submitted in the RM
-    // Queue for App master
-
-    submissionContext.setQueue(queue);
-
-
-    //container requirements
-    submissionContext.setResource(resource);
-    submissionContext.setLogAggregationContext(logAggregationContext);
-
-    if (keepContainersOverRestarts) {
-      log.debug("Requesting cluster stays running over AM failure");
-      submissionContext.setKeepContainersAcrossApplicationAttempts(true);
-    }
-
-    if (maxAppAttempts > 0) {
-      log.debug("Setting max AM attempts to {}", maxAppAttempts);
-      submissionContext.setMaxAppAttempts(maxAppAttempts);
-    }
-
-    if (secureCluster) {
-      //tokens
-      log.debug("Credentials: {}",
-          CredentialUtils.dumpTokens(getCredentials(), "\n"));
-
-    } else {
-      propagateUsernameInInsecureCluster();
-    }
-    completeContainerLaunch();
-    submissionContext.setAMContainerSpec(containerLaunchContext);
-    return submissionContext;
-  }
-
-  /**
-   * Submit the application. 
-   * @return a launched application representing the submitted application
-   * @throws IOException
-   * @throws YarnException
-   */
-  public LaunchedApplication submitApplication() throws IOException, YarnException {
-    completeAppMasterLaunch();
-    log.info("Submitting application to Resource Manager");
-    ApplicationId applicationId =
-      yarnClient.submitApplication(submissionContext);
-    // implicit success; record the time
-    submitTime = System.currentTimeMillis();
-    return new LaunchedApplication(applicationId, yarnClient);
-  }
-
-  /**
-   * Build a serializable application report. This is a very minimal
-   * report that contains the application Id, name and type —the information
-   * available
-   * @return a data structure which can be persisted
-   */
-  public SerializedApplicationReport createSerializedApplicationReport() {
-    SerializedApplicationReport sar = new SerializedApplicationReport();
-    sar.applicationId = appId.toString();
-    sar.name = name;
-    sar.applicationType = type;
-    sar.queue = queue;
-    sar.submitTime = submitTime;
-    return sar;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/LaunchedApplication.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/LaunchedApplication.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/LaunchedApplication.java
deleted file mode 100644
index 632e3fd..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/LaunchedApplication.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.core.launch;
-
-import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.slider.client.SliderYarnClientImpl;
-import org.apache.slider.common.tools.Duration;
-
-import java.io.IOException;
-
-/**
- * Launched App with logic around it.
- */
-public class LaunchedApplication {
-
-  protected final ApplicationId applicationId;
-  protected final SliderYarnClientImpl yarnClient;
-
-  public LaunchedApplication(ApplicationId applicationId,
-                             SliderYarnClientImpl yarnClient) {
-    assert applicationId != null;
-    assert yarnClient != null;
-    this.applicationId = applicationId;
-    this.yarnClient = yarnClient;
-  }
-
-  public LaunchedApplication(SliderYarnClientImpl yarnClient,
-                             ApplicationReport report) {
-    this.yarnClient = yarnClient;
-    this.applicationId = report.getApplicationId();
-  }
-
-  public ApplicationId getApplicationId() {
-    return applicationId;
-  }
-
-  /**
-   * Monitor the submitted application for reaching the requested state.
-   * Will also report if the app reaches a later state (failed, killed, etc)
-   * Kill application if duration!= null & time expires. 
-   * @param duration how long to wait -must be more than 0
-   * @param desiredState desired state.
-   * @return the application report -null on a timeout
-   * @throws YarnException
-   * @throws IOException
-   */
-  public ApplicationReport monitorAppToState(YarnApplicationState desiredState, Duration duration)
-    throws YarnException, IOException {
-    return yarnClient.monitorAppToState(applicationId, desiredState, duration);
-  }
-
-  /**
-   * Kill the submitted application by sending a call to the ASM
-   * @throws YarnException
-   * @throws IOException
-   */
-  public boolean forceKill(String reason)
-    throws YarnException, IOException {
-    if (applicationId != null) {
-      yarnClient.killRunningApplication(applicationId, reason);
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * Kill the application
-   * @return the response
-   * @throws YarnException YARN problems
-   * @throws IOException IO problems
-   */
-  public KillApplicationResponse kill(String reason) throws
-                                                     YarnException,
-                                                     IOException {
-    return yarnClient.killRunningApplication(applicationId, reason);
-  }
-
-  /**
-   * Get the application report of this application
-   * @return an application report
-   * @throws YarnException
-   * @throws IOException
-   */
-  public ApplicationReport getApplicationReport()
-    throws YarnException, IOException {
-    return yarnClient.getApplicationReport(applicationId);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/RunningApplication.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/RunningApplication.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/RunningApplication.java
deleted file mode 100644
index 14c522c..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/RunningApplication.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.core.launch;
-
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.slider.api.SliderClusterProtocol;
-import org.apache.slider.client.SliderYarnClientImpl;
-import org.apache.slider.common.SliderExitCodes;
-import org.apache.slider.core.exceptions.SliderException;
-import org.apache.slider.server.appmaster.rpc.RpcBinder;
-
-import java.io.IOException;
-
-import static org.apache.slider.common.Constants.CONNECT_TIMEOUT;
-import static org.apache.slider.common.Constants.RPC_TIMEOUT;
-
-/**
- * A running application built from an app report. This one
- * can be talked to
- */
-public class RunningApplication extends LaunchedApplication {
-
-  private final ApplicationReport applicationReport;
-  public RunningApplication(SliderYarnClientImpl yarnClient,
-                            ApplicationReport applicationReport) {
-    super(yarnClient, applicationReport);
-    this.applicationReport = applicationReport;
-  }
-
-  public ApplicationReport getApplicationReport() {
-    return applicationReport;
-  }
-
-
-  /**
-   * Connect to a Slider AM
-   * @param app application report providing the details on the application
-   * @return an instance
-   * @throws YarnException
-   * @throws IOException
-   */
-  public SliderClusterProtocol connect(ApplicationReport app) throws
-                                                             YarnException,
-                                                             IOException {
-
-    try {
-      return RpcBinder.getProxy(yarnClient.getConfig(),
-                                yarnClient.getRmClient(),
-                                app,
-                                CONNECT_TIMEOUT,
-                                RPC_TIMEOUT);
-    } catch (InterruptedException e) {
-      throw new SliderException(SliderExitCodes.EXIT_TIMED_OUT,
-          e,
-          "Interrupted waiting for communications with the Application Master");
-    }
-  }
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: YARN-6544. Add Null check RegistryDNS service while parsing registry records. Contributed by Karam Singh

Posted by ji...@apache.org.
YARN-6544. Add Null check RegistryDNS service while parsing registry records. Contributed by Karam Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/427835d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/427835d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/427835d7

Branch: refs/heads/yarn-native-services
Commit: 427835d7748c77cd2c52bb355b8f4f0e8488c8a4
Parents: 7acc577
Author: Gour Saha <go...@apache.org>
Authored: Fri May 12 15:04:17 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:26 2017 -0700

----------------------------------------------------------------------
 .../hadoop/registry/server/dns/RegistryDNS.java | 31 +++++++++++++-------
 .../registry/server/dns/TestRegistryDNS.java    | 29 ++++++++++++++++++
 2 files changed, 49 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/427835d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
index 126795a..9ffc9db 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
@@ -1393,19 +1393,28 @@ public class RegistryDNS extends AbstractService implements DNSOperations,
       throws IOException {
     ServiceRecordProcessor processor;
     try {
-      if (record.get(YarnRegistryAttributes.YARN_PERSISTENCE)
-          .equals(CONTAINER)) {
-        // container registration.  the logic to identify and create the
-        // container entry needs to be enhanced/more accurate and associate to
-        // correct host
-        processor =
-            new ContainerServiceRecordProcessor(record, path, domainName, this);
+      String yarnPersistanceValue = record.get(
+                                    YarnRegistryAttributes.YARN_PERSISTENCE);
+      if (yarnPersistanceValue != null) {
+        if (yarnPersistanceValue.equals(CONTAINER)) {
+          // container registration.  the logic to identify and create the
+          // container entry needs to be enhanced/more accurate and associate
+          // to correct host
+          processor =
+               new ContainerServiceRecordProcessor(record, path, domainName,
+                   this);
+        } else {
+          LOG.debug("Creating ApplicationServiceRecordProcessor for {}",
+                    yarnPersistanceValue);
+          processor =
+               new ApplicationServiceRecordProcessor(record, path, domainName,
+                   this);
+        }
+        processor.manageDNSRecords(command);
       } else {
-        processor =
-            new ApplicationServiceRecordProcessor(record, path, domainName,
-                this);
+        LOG.warn("Yarn Resgistry record {} does not contain {} attribute ",
+                  record.toString(), YarnRegistryAttributes.YARN_PERSISTENCE);
       }
-      processor.manageDNSRecords(command);
     } catch (Exception e) {
       throw new IOException(e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/427835d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
index d58b1c8..fcb602c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
@@ -129,6 +129,16 @@ public class TestRegistryDNS extends Assert {
       + "  \"yarn:persistence\" : \"container\"\n"
       + "}\n";
 
+  private static final String CONTAINER_RECORD_YARN_PERSISTANCE_ABSENT = "{\n"
+      + "  \"type\" : \"JSONServiceRecord\",\n"
+      + "  \"description\" : \"YCLOUD\",\n"
+      + "  \"external\" : [ ],\n"
+      + "  \"internal\" : [ ],\n"
+      + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000003\",\n"
+      + "  \"yarn:ip\" : \"172.17.0.19\",\n"
+      + "  \"yarn:hostname\" : \"0a134d6329bb\"\n"
+      + "}\n";
+
   @Before
   public void initialize() throws Exception {
     setRegistryDNS(new RegistryDNS("TestRegistry"));
@@ -220,6 +230,25 @@ public class TestRegistryDNS extends Assert {
   }
 
   @Test
+  public void testContainerRegistrationPersistanceAbsent() throws Exception {
+    ServiceRecord record = marshal.fromBytes("somepath",
+        CONTAINER_RECORD_YARN_PERSISTANCE_ABSENT.getBytes());
+    registryDNS.register(
+        "/registry/users/root/services/org-apache-slider/test1/components/"
+            + "container-e50-1451931954322-0016-01-000003",
+         record);
+
+    Name name =
+        Name.fromString("ctr-e50-1451931954322-0016-01-000002.hwx.test.");
+    Record question = Record.newRecord(name, Type.A, DClass.IN);
+    Message query = Message.newQuery(question);
+    byte[] responseBytes = registryDNS.generateReply(query, null);
+    Message response = new Message(responseBytes);
+    assertEquals("Excepting NXDOMAIN as Record must not have regsisterd wrong",
+        Rcode.NXDOMAIN, response.getRcode());
+  }
+
+  @Test
   public void testRecordTTL() throws Exception {
     ServiceRecord record = getMarshal().fromBytes("somepath",
         CONTAINER_RECORD.getBytes());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: YARN-6335. Port slider's groovy unit tests to yarn native services. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/TestServiceTimelinePublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/TestServiceTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/TestServiceTimelinePublisher.java
index 29eb5ca..9f3ebba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/TestServiceTimelinePublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/TestServiceTimelinePublisher.java
@@ -18,18 +18,6 @@
 
 package org.apache.slider.server.appmaster.timelineservice;
 
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@@ -54,6 +42,18 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
 /**
  * Test class for ServiceTimelinePublisher.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/registry/PathEntryMarshalling.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/registry/PathEntryMarshalling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/registry/PathEntryMarshalling.java
new file mode 100644
index 0000000..bc3cbbe
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/registry/PathEntryMarshalling.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.web.rest.registry;
+
+import org.apache.hadoop.registry.client.binding.JsonSerDeser;
+
+class PathEntryMarshalling
+    extends JsonSerDeser<PathEntryResource> {
+  public PathEntryMarshalling() {
+    super(PathEntryResource.class);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/registry/TestRegistryRestMarshalling.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/registry/TestRegistryRestMarshalling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/registry/TestRegistryRestMarshalling.java
new file mode 100644
index 0000000..2216479
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/registry/TestRegistryRestMarshalling.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.web.rest.registry;
+
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
+import org.junit.Test;
+
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * This test exists because Jersey appears to behave "oddly"
+ * when it comes to marshalling JSON, and some of the REST resources
+ * appear to have trouble.
+ *
+ * This test tries to isolate it
+ */
+public class TestRegistryRestMarshalling {
+
+  @Test
+  public void testDeser() throws Throwable {
+    PathEntryMarshalling pem = new PathEntryMarshalling();
+    PathEntryResource unmarshalled = pem.fromResource(
+        "/org/apache/slider/server/appmaster/web/rest/registry/sample.json");
+
+    ServiceRecord serviceRecord = unmarshalled.service;
+    assertNotNull(serviceRecord);
+    assertNotNull(serviceRecord.get(YarnRegistryAttributes.YARN_ID));
+    assertNotEquals("", serviceRecord.get(YarnRegistryAttributes
+        .YARN_PERSISTENCE));
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.java
new file mode 100644
index 0000000..e4e344e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.view;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.slider.providers.ProviderService;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockProviderService;
+import org.apache.slider.server.appmaster.state.ProviderAppState;
+import org.apache.slider.server.appmaster.web.WebAppApi;
+import org.apache.slider.server.appmaster.web.WebAppApiImpl;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+
+/**
+ * Test cluster specification block.
+ */
+public class TestClusterSpecificationBlock extends BaseMockAppStateTest {
+
+  private ClusterSpecificationBlock clusterSpecBlock;
+
+  @Before
+  public void setup() throws Exception {
+    super.setup();
+    ProviderAppState providerAppState = new ProviderAppState(
+        "undefined",
+        appState);
+    ProviderService providerService = new MockProviderService();
+
+    WebAppApiImpl inst = new WebAppApiImpl(
+        providerAppState,
+        providerService,
+        null,
+        null, null);
+
+    Injector injector = Guice.createInjector(new AbstractModule() {
+          @Override
+          protected void configure() {
+            bind(WebAppApi.class).toInstance(inst);
+          }
+        });
+
+    clusterSpecBlock = injector.getInstance(ClusterSpecificationBlock.class);
+  }
+
+  @Test
+  public void testJsonGeneration() {
+    StringWriter sw = new StringWriter(64);
+    PrintWriter pw = new PrintWriter(sw);
+
+    Hamlet hamlet = new Hamlet(pw, 0, false);
+
+    int level = hamlet.nestLevel();
+    clusterSpecBlock.doRender(hamlet);
+
+    assertEquals(level, hamlet.nestLevel());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.java
new file mode 100644
index 0000000..92f8559
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.java
@@ -0,0 +1,255 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.view;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
+import org.apache.hadoop.yarn.webapp.hamlet.HamletImpl.EImp;
+import org.apache.slider.api.ClusterNode;
+import org.apache.slider.providers.ProviderService;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockContainer;
+import org.apache.slider.server.appmaster.model.mock.MockContainerId;
+import org.apache.slider.server.appmaster.model.mock.MockNodeId;
+import org.apache.slider.server.appmaster.model.mock.MockProviderService;
+import org.apache.slider.server.appmaster.model.mock.MockResource;
+import org.apache.slider.server.appmaster.state.ProviderAppState;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.web.WebAppApi;
+import org.apache.slider.server.appmaster.web.WebAppApiImpl;
+import org.apache.slider.server.appmaster.web.view.ContainerStatsBlock.ClusterNodeNameComparator;
+import org.apache.slider.server.appmaster.web.view.ContainerStatsBlock.TableAnchorContent;
+import org.apache.slider.server.appmaster.web.view.ContainerStatsBlock.TableContent;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Test container stats block.
+ */
+public class TestContainerStatsBlock extends BaseMockAppStateTest {
+
+  private ContainerStatsBlock statsBlock;
+
+  private Container cont1, cont2;
+
+  @Before
+  public void setup() throws Exception {
+    super.setup();
+    ProviderService providerService = new MockProviderService();
+    ProviderAppState providerAppState = new ProviderAppState(
+        "undefined",
+        appState);
+
+    WebAppApiImpl inst = new WebAppApiImpl(
+        providerAppState,
+        providerService,
+        null,
+        METRICS, null);
+
+    Injector injector = Guice.createInjector(new WebappModule(inst));
+    statsBlock = injector.getInstance(ContainerStatsBlock.class);
+
+    cont1 = new MockContainer();
+
+    cont1.setId(mockContainerId(0));
+    cont1.setNodeId(new MockNodeId());
+    cont1.setPriority(Priority.newInstance(1));
+    cont1.setResource(new MockResource(0, 0));
+
+    cont2 = new MockContainer();
+    cont2.setId(mockContainerId(1));
+    cont2.setNodeId(new MockNodeId());
+    cont2.setPriority(Priority.newInstance(1));
+    cont2.setResource(new MockResource(0, 0));
+  }
+
+  private static class WebappModule extends AbstractModule {
+    private final WebAppApiImpl instance;
+
+    WebappModule(WebAppApiImpl instance) {
+      this.instance = instance;
+    }
+
+    @Override
+    protected void configure() {
+      bind(WebAppApi.class).toInstance(instance);
+    }
+  }
+
+
+  public MockContainerId mockContainerId(int count) {
+    return new MockContainerId(applicationAttemptId, count);
+  }
+
+  @Test
+  public void testGetContainerInstances() {
+    List<RoleInstance> roles = Arrays.asList(
+        new RoleInstance(cont1),
+        new RoleInstance(cont2)
+    );
+    Map<String, RoleInstance> map = statsBlock.getContainerInstances(roles);
+
+    assertEquals(2, map.size());
+
+    assertTrue(map.containsKey("mockcontainer_0"));
+    assertEquals(map.get("mockcontainer_0"), roles.get(0));
+
+    assertTrue(map.containsKey("mockcontainer_1"));
+    assertEquals(map.get("mockcontainer_1"), roles.get(1));
+  }
+
+  @Test
+  public void testGenerateRoleDetailsWithTwoColumns() {
+    StringWriter sw = new StringWriter(64);
+    PrintWriter pw = new PrintWriter(sw);
+
+    Hamlet hamlet = new Hamlet(pw, 0, false);
+
+    // Make a div to put the content into
+    DIV<Hamlet> div = hamlet.div();
+
+    String detailsName = "testing";
+    String selector = "selector";
+    Map<TableContent, String> data = new HashMap<>();
+    data.put(new ContainerStatsBlock.TableContent("Foo"), "bar");
+
+    int levelPrior = hamlet.nestLevel();
+    statsBlock.generateRoleDetails(div, selector, detailsName, data.entrySet());
+
+    // Close out the div we made
+    // DIV<Hamlet>._() will actually invoke the wrong method (creating <p>),
+    // explicit cast to make sure we're closing out the <div>
+    ((EImp) div)._();
+
+    assertEquals(levelPrior, hamlet.nestLevel());
+  }
+
+  @Test
+  public void testGenerateRoleDetailsWithOneColumn() {
+    StringWriter sw = new StringWriter(64);
+    PrintWriter pw = new PrintWriter(sw);
+
+    Hamlet hamlet = new Hamlet(pw, 0, false);
+    DIV<Hamlet> div = hamlet.div();
+
+    String detailsName = "testing";
+    String selector = "selector";
+    Map<TableContent, String> data = new HashMap<>();
+    data.put(new ContainerStatsBlock.TableContent("Bar"), null);
+
+    int levelPrior = hamlet.nestLevel();
+    statsBlock.generateRoleDetails(div, selector, detailsName, data.entrySet());
+
+    // Close out the div we made
+    // DIV<Hamlet>._() will actually invoke the wrong method (creating <p>),
+    // explicit cast to make sure we're closing out the <div>
+    ((EImp) div)._();
+
+    assertEquals(levelPrior, hamlet.nestLevel());
+  }
+
+  @Test
+  public void testGenerateRoleDetailsWithNoData() {
+    StringWriter sw = new StringWriter(64);
+    PrintWriter pw = new PrintWriter(sw);
+
+    Hamlet hamlet = new Hamlet(pw, 0, false);
+    DIV<Hamlet> div = hamlet.div();
+
+    String detailsName = "testing";
+    String selector = "selector";
+    Map<TableContent, String> data = new HashMap<>();
+
+    int levelPrior = hamlet.nestLevel();
+    statsBlock.generateRoleDetails(div, selector, detailsName, data.entrySet());
+
+    // Close out the div we made
+    // DIV<Hamlet>._() will actually invoke the wrong method (creating <p>),
+    // explicit cast to make sure we're closing out the <div>
+    ((EImp) div)._();
+
+    assertEquals(levelPrior, hamlet.nestLevel());
+  }
+
+  @Test
+  public void testClusterNodeNameComparator() {
+    ClusterNode n1 = new ClusterNode(mockContainerId(1)),
+        n2 = new ClusterNode(mockContainerId(2)),
+        n3 = new ClusterNode(mockContainerId(3));
+
+    List<ClusterNode> nodes = new ArrayList<ClusterNode>();
+    nodes.add(n2);
+    nodes.add(n3);
+    nodes.add(n1);
+
+    Collections.sort(nodes, new ClusterNodeNameComparator());
+
+    String prevName = "";
+    for (ClusterNode node : nodes) {
+      assertTrue(prevName.compareTo(node.name) <= 0);
+      prevName = node.name;
+    }
+  }
+
+  @Test
+  public void testTableContent() {
+    StringWriter sw = new StringWriter(64);
+    PrintWriter pw = new PrintWriter(sw);
+    TableContent tc = new TableContent("foo");
+
+    Hamlet hamlet = new Hamlet(pw, 0, false);
+    TR<TABLE<Hamlet>> tr = hamlet.table().tr();
+
+    int prevLevel = hamlet.nestLevel();
+    // printCell should not end the tr
+    tc.printCell(tr);
+    tr._();
+    assertEquals(prevLevel, hamlet.nestLevel());
+  }
+
+  @Test
+  public void testTableAnchorContent() {
+    StringWriter sw = new StringWriter(64);
+    PrintWriter pw = new PrintWriter(sw);
+    TableContent tc = new TableAnchorContent("foo", "http://bar.com");
+
+    Hamlet hamlet = new Hamlet(pw, 0, false);
+    TR<TABLE<Hamlet>> tr = hamlet.table().tr();
+
+    int prevLevel = hamlet.nestLevel();
+    // printCell should not end the tr
+    tc.printCell(tr);
+    tr._();
+    assertEquals(prevLevel, hamlet.nestLevel());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestIndexBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestIndexBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestIndexBlock.java
new file mode 100644
index 0000000..96eb3d7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestIndexBlock.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.view;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.slider.providers.ProviderService;
+import org.apache.slider.server.appmaster.model.appstate.BaseMockAppStateAATest;
+import org.apache.slider.server.appmaster.model.mock.MockContainer;
+import org.apache.slider.server.appmaster.model.mock.MockContainerId;
+import org.apache.slider.server.appmaster.model.mock.MockNodeId;
+import org.apache.slider.server.appmaster.model.mock.MockProviderService;
+import org.apache.slider.server.appmaster.model.mock.MockResource;
+import org.apache.slider.server.appmaster.state.ContainerOutcome;
+import org.apache.slider.server.appmaster.state.OutstandingRequest;
+import org.apache.slider.server.appmaster.state.ProviderAppState;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.apache.slider.server.appmaster.web.WebAppApi;
+import org.apache.slider.server.appmaster.web.WebAppApiImpl;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+
+/**
+ * Test index block.
+ */
+public class TestIndexBlock extends BaseMockAppStateAATest {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestIndexBlock.class);
+
+  private IndexBlock indexBlock;
+
+  private Container cont1, cont2;
+
+  @Before
+  public void setup() throws Exception {
+    super.setup();
+    assertNotNull(appState);
+    ProviderService providerService = new MockProviderService();
+    ProviderAppState providerAppState = new ProviderAppState(
+        "undefined",
+        appState);
+
+    WebAppApiImpl inst = new WebAppApiImpl(
+        providerAppState,
+        providerService,
+        null,
+        METRICS, null);
+
+    Injector injector = Guice.createInjector(new AbstractModule() {
+          @Override
+          protected void configure() {
+            bind(WebAppApi.class).toInstance(inst);
+          }
+        });
+
+    indexBlock = injector.getInstance(IndexBlock.class);
+
+    cont1 = new MockContainer();
+    cont1.setId(new MockContainerId(applicationAttemptId, 0));
+    cont1.setNodeId(new MockNodeId());
+    cont1.setPriority(Priority.newInstance(1));
+    cont1.setResource(new MockResource(0, 0));
+
+    cont2 = new MockContainer();
+    cont2.setId(new MockContainerId(applicationAttemptId, 1));
+    cont2.setNodeId(new MockNodeId());
+    cont2.setPriority(Priority.newInstance(1));
+    cont2.setResource(new MockResource(0, 0));
+  }
+
+  @Test
+  public void testIndex() {
+    RoleStatus role0 = getRole0Status();
+    RoleStatus role1 = getRole1Status();
+    RoleStatus role2 = getRole2Status();
+
+    int role0Desired = 8;
+
+    role0.setDesired(role0Desired);
+    int role0Actual = 5;
+    int role0Requested = role0Desired - role0Actual;
+    for (int i = 0; i < role0Actual; i++) {
+      appState.incRunningContainers(role0);
+    }
+    assertEquals(role0.getRunning(), role0Actual);
+    for (int i = 0; i < role0Requested; i++) {
+      appState.incRequestedContainers(role0);
+    }
+    assertEquals(role0.getRequested(), role0Requested);
+
+    int role0Failures = 2;
+
+    appState.incFailedContainers(role0, ContainerOutcome.Failed);
+    appState.incFailedContainers(role0, ContainerOutcome.Failed);
+
+    RoleStatus aaRole = getAaRole();
+    // all aa roles fields are in the
+    int aaroleDesired = 200;
+    aaRole.setDesired(aaroleDesired);
+    int aaroleActual = 90;
+    int aaroleActive = 1;
+    int aaroleRequested = aaroleDesired - aaroleActual;
+    int aarolePending = aaroleRequested - 1;
+    int aaroleFailures = 0;
+    for (int i = 0; i < aaroleActual; i++) {
+      appState.incRunningContainers(aaRole);
+    }
+    assertEquals(aaRole.getRunning(), aaroleActual);
+    aaRole.setOutstandingAArequest(new OutstandingRequest(2, ""));
+    // add a requested
+    appState.incRequestedContainers(aaRole);
+    aaRole.getComponentMetrics().pendingAAContainers.set(aarolePending);
+    assertEquals(aaRole.getAAPending(), aarolePending);
+
+    assertEquals(aaRole.getActualAndRequested(), aaroleActual + 1);
+    StringWriter sw = new StringWriter(64);
+    PrintWriter pw = new PrintWriter(sw);
+
+    Hamlet hamlet = new Hamlet(pw, 0, false);
+
+    indexBlock.doIndex(hamlet, "accumulo");
+
+    String body = sw.toString();
+    LOG.info(body);
+    // verify role data came out
+    assertTrue(body.contains("role0"));
+    assertContains(role0Desired, body);
+    assertContains(role0Actual, body);
+    assertContains(role0Requested, body);
+    assertContains(role0Failures, body);
+
+    assertTrue(body.contains("role1"));
+    assertTrue(body.contains("role2"));
+
+    assertContains(aaroleDesired, body);
+    assertContains(aaroleActual, body);
+//    assertContains(aaroleRequested, body)
+    assertContains(aaroleFailures, body);
+    assertTrue(body.contains(indexBlock.buildAADetails(true, aarolePending)));
+
+    // verify that the sorting took place
+    assertTrue(body.indexOf("role0") < body.indexOf("role1"));
+    assertTrue(body.indexOf("role1") < body.indexOf("role2"));
+
+    assertFalse(body.contains(IndexBlock.ALL_CONTAINERS_ALLOCATED));
+    // role
+  }
+
+  void assertContains(int ex, String html) {
+    assertStringContains(Integer.toString(ex), html);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/management/TestGauges.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/management/TestGauges.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/management/TestGauges.java
new file mode 100644
index 0000000..da8366f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/management/TestGauges.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.management;
+
+import org.apache.slider.server.appmaster.management.LongGauge;
+import org.apache.slider.utils.SliderTestBase;
+import org.junit.Test;
+
+/**
+ * Test gauges.
+ */
+public class TestGauges extends SliderTestBase {
+
+  @Test
+  public void testLongGaugeOperations() throws Throwable {
+    LongGauge gauge = new LongGauge();
+    assertEquals(0, gauge.get());
+    gauge.inc();
+    assertEquals(1, gauge.get());
+    gauge.inc();
+    assertEquals(2, gauge.get());
+    gauge.inc();
+    assertEquals(3, gauge.get());
+    assertEquals(gauge.getValue().longValue(), gauge.get());
+    assertEquals(gauge.getCount().longValue(), gauge.get());
+
+    gauge.dec();
+    assertEquals(2, gauge.get());
+    assertEquals(1, gauge.decToFloor(1));
+    assertEquals(1, gauge.get());
+    assertEquals(0, gauge.decToFloor(1));
+    assertEquals(0, gauge.decToFloor(1));
+    assertEquals(0, gauge.decToFloor(0));
+
+    gauge.set(4);
+    assertEquals(0, gauge.decToFloor(8));
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/servicemonitor/TestPortProbe.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/servicemonitor/TestPortProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/servicemonitor/TestPortProbe.java
index a93ec57..458d1bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/servicemonitor/TestPortProbe.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/servicemonitor/TestPortProbe.java
@@ -17,8 +17,8 @@
 
 package org.apache.slider.server.servicemonitor;
 
-import org.junit.Assert;
 import org.apache.hadoop.conf.Configuration;
+import org.junit.Assert;
 import org.junit.Test;
 
 public class TestPortProbe extends Assert {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/ContractTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/ContractTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/ContractTestUtils.java
deleted file mode 100644
index 7eaaefe..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/ContractTestUtils.java
+++ /dev/null
@@ -1,901 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.slider.test;
-
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.Assert;
-import org.junit.internal.AssumptionViolatedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.EOFException;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.Arrays;
-import java.util.Properties;
-import java.util.UUID;
-
-/**
- * Utilities used across test cases to make assertions about filesystems
- * -assertions which fail with useful information.
- * This is lifted from Hadoop common Test; that JAR isn't published, so
- * we have to make do.
- */
-public class ContractTestUtils extends Assert {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContractTestUtils.class);
-
-  public static final String IO_FILE_BUFFER_SIZE = "io.file.buffer.size";
-
-  // For scale testing, we can repeatedly write small chunk data to generate
-  // a large file.
-  public static final String IO_CHUNK_BUFFER_SIZE = "io.chunk.buffer.size";
-  public static final int DEFAULT_IO_CHUNK_BUFFER_SIZE = 128;
-  public static final String IO_CHUNK_MODULUS_SIZE = "io.chunk.modulus.size";
-  public static final int DEFAULT_IO_CHUNK_MODULUS_SIZE = 128;
-
-  /**
-   * Assert that a property in the property set matches the expected value
-   * @param props property set
-   * @param key property name
-   * @param expected expected value. If null, the property must not be in the set
-   */
-  public static void assertPropertyEquals(Properties props,
-                                          String key,
-                                          String expected) {
-    String val = props.getProperty(key);
-    if (expected == null) {
-      assertNull("Non null property " + key + " = " + val, val);
-    } else {
-      assertEquals("property " + key + " = " + val,
-                          expected,
-                          val);
-    }
-  }
-
-  /**
-   *
-   * Write a file and read it in, validating the result. Optional flags control
-   * whether file overwrite operations should be enabled, and whether the
-   * file should be deleted afterwards.
-   *
-   * If there is a mismatch between what was written and what was expected,
-   * a small range of bytes either side of the first error are logged to aid
-   * diagnosing what problem occurred -whether it was a previous file
-   * or a corrupting of the current file. This assumes that two
-   * sequential runs to the same path use datasets with different character
-   * moduli.
-   *
-   * @param fs filesystem
-   * @param path path to write to
-   * @param len length of data
-   * @param overwrite should the create option allow overwrites?
-   * @param delete should the file be deleted afterwards? -with a verification
-   * that it worked. Deletion is not attempted if an assertion has failed
-   * earlier -it is not in a <code>finally{}</code> block.
-   * @throws IOException IO problems
-   */
-  public static void writeAndRead(FileSystem fs,
-                                  Path path,
-                                  byte[] src,
-                                  int len,
-                                  int blocksize,
-                                  boolean overwrite,
-                                  boolean delete) throws IOException {
-    fs.mkdirs(path.getParent());
-
-    writeDataset(fs, path, src, len, blocksize, overwrite);
-
-    byte[] dest = readDataset(fs, path, len);
-
-    compareByteArrays(src, dest, len);
-
-    if (delete) {
-      rejectRootOperation(path);
-      boolean deleted = fs.delete(path, false);
-      assertTrue("Deleted", deleted);
-      assertPathDoesNotExist(fs, "Cleanup failed", path);
-    }
-  }
-
-  /**
-   * Write a file.
-   * Optional flags control
-   * whether file overwrite operations should be enabled
-   * @param fs filesystem
-   * @param path path to write to
-   * @param len length of data
-   * @param overwrite should the create option allow overwrites?
-   * @throws IOException IO problems
-   */
-  public static void writeDataset(FileSystem fs,
-                                   Path path,
-                                   byte[] src,
-                                   int len,
-                                   int buffersize,
-                                   boolean overwrite) throws IOException {
-    assertTrue(
-      "Not enough data in source array to write " + len + " bytes",
-      src.length >= len);
-    FSDataOutputStream out = fs.create(path,
-                                       overwrite,
-                                       fs.getConf()
-                                         .getInt(IO_FILE_BUFFER_SIZE,
-                                                 4096),
-                                       (short) 1,
-                                       buffersize);
-    out.write(src, 0, len);
-    out.close();
-    assertFileHasLength(fs, path, len);
-  }
-
-  /**
-   * Read the file and convert to a byte dataset.
-   * This implements readfully internally, so that it will read
-   * in the file without ever having to seek()
-   * @param fs filesystem
-   * @param path path to read from
-   * @param len length of data to read
-   * @return the bytes
-   * @throws IOException IO problems
-   */
-  public static byte[] readDataset(FileSystem fs, Path path, int len)
-      throws IOException {
-    FSDataInputStream in = fs.open(path);
-    byte[] dest = new byte[len];
-    int offset =0;
-    int nread = 0;
-    try {
-      while (nread < len) {
-        int nbytes = in.read(dest, offset + nread, len - nread);
-        if (nbytes < 0) {
-          throw new EOFException("End of file reached before reading fully.");
-        }
-        nread += nbytes;
-      }
-    } finally {
-      in.close();
-    }
-    return dest;
-  }
-
-  /**
-   * Read a file, verify its length and contents match the expected array
-   * @param fs filesystem
-   * @param path path to file
-   * @param original original dataset
-   * @throws IOException IO Problems
-   */
-  public static void verifyFileContents(FileSystem fs,
-                                        Path path,
-                                        byte[] original) throws IOException {
-    FileStatus stat = fs.getFileStatus(path);
-    String statText = stat.toString();
-    assertTrue("not a file " + statText, stat.isFile());
-    assertEquals("wrong length " + statText, original.length, stat.getLen());
-    byte[] bytes = readDataset(fs, path, original.length);
-    compareByteArrays(original,bytes,original.length);
-  }
-
-  /**
-   * Verify that the read at a specific offset in a stream
-   * matches that expected
-   * @param stm stream
-   * @param fileContents original file contents
-   * @param seekOff seek offset
-   * @param toRead number of bytes to read
-   * @throws IOException IO problems
-   */
-  public static void verifyRead(FSDataInputStream stm, byte[] fileContents,
-                                int seekOff, int toRead) throws IOException {
-    byte[] out = new byte[toRead];
-    stm.seek(seekOff);
-    stm.readFully(out);
-    byte[] expected = Arrays.copyOfRange(fileContents, seekOff,
-                                         seekOff + toRead);
-    compareByteArrays(expected, out,toRead);
-  }
-
-  /**
-   * Assert that tthe array original[0..len] and received[] are equal.
-   * A failure triggers the logging of the bytes near where the first
-   * difference surfaces.
-   * @param original source data
-   * @param received actual
-   * @param len length of bytes to compare
-   */
-  public static void compareByteArrays(byte[] original,
-                                       byte[] received,
-                                       int len) {
-    assertEquals("Number of bytes read != number written",
-                        len, received.length);
-    int errors = 0;
-    int first_error_byte = -1;
-    for (int i = 0; i < len; i++) {
-      if (original[i] != received[i]) {
-        if (errors == 0) {
-          first_error_byte = i;
-        }
-        errors++;
-      }
-    }
-
-    if (errors > 0) {
-      String message = String.format(" %d errors in file of length %d",
-                                     errors, len);
-      LOG.warn(message);
-      // the range either side of the first error to print
-      // this is a purely arbitrary number, to aid user debugging
-      final int overlap = 10;
-      for (int i = Math.max(0, first_error_byte - overlap);
-           i < Math.min(first_error_byte + overlap, len);
-           i++) {
-        byte actual = received[i];
-        byte expected = original[i];
-        String letter = toChar(actual);
-        String line = String.format("[%04d] %2x %s\n", i, actual, letter);
-        if (expected != actual) {
-          line = String.format("[%04d] %2x %s -expected %2x %s\n",
-                               i,
-                               actual,
-                               letter,
-                               expected,
-                               toChar(expected));
-        }
-        LOG.warn(line);
-      }
-      fail(message);
-    }
-  }
-
-  /**
-   * Convert a byte to a character for printing. If the
-   * byte value is < 32 -and hence unprintable- the byte is
-   * returned as a two digit hex value
-   * @param b byte
-   * @return the printable character string
-   */
-  public static String toChar(byte b) {
-    if (b >= 0x20) {
-      return Character.toString((char) b);
-    } else {
-      return String.format("%02x", b);
-    }
-  }
-
-  /**
-   * Convert a buffer to a string, character by character
-   * @param buffer input bytes
-   * @return a string conversion
-   */
-  public static String toChar(byte[] buffer) {
-    StringBuilder builder = new StringBuilder(buffer.length);
-    for (byte b : buffer) {
-      builder.append(toChar(b));
-    }
-    return builder.toString();
-  }
-
-  public static byte[] toAsciiByteArray(String s) {
-    char[] chars = s.toCharArray();
-    int len = chars.length;
-    byte[] buffer = new byte[len];
-    for (int i = 0; i < len; i++) {
-      buffer[i] = (byte) (chars[i] & 0xff);
-    }
-    return buffer;
-  }
-
-  /**
-   * Cleanup at the end of a test run
-   * @param action action triggering the operation (for use in logging)
-   * @param fileSystem filesystem to work with. May be null
-   * @param cleanupPath path to delete as a string
-   */
-  public static void cleanup(String action,
-                             FileSystem fileSystem,
-                             String cleanupPath) {
-    if (fileSystem == null) {
-      return;
-    }
-    Path path = new Path(cleanupPath).makeQualified(fileSystem.getUri(),
-        fileSystem.getWorkingDirectory());
-    cleanup(action, fileSystem, path);
-  }
-
-  /**
-   * Cleanup at the end of a test run
-   * @param action action triggering the operation (for use in logging)
-   * @param fileSystem filesystem to work with. May be null
-   * @param path path to delete
-   */
-  public static void cleanup(String action, FileSystem fileSystem, Path path) {
-    noteAction(action);
-    try {
-      rm(fileSystem, path, true, false);
-    } catch (Exception e) {
-      LOG.error("Error deleting in "+ action + " - "  + path + ": " + e, e);
-    }
-  }
-
-  /**
-   * Delete a directory. There's a safety check for operations against the
-   * root directory -these are intercepted and rejected with an IOException
-   * unless the allowRootDelete flag is true
-   * @param fileSystem filesystem to work with. May be null
-   * @param path path to delete
-   * @param recursive flag to enable recursive delete
-   * @param allowRootDelete can the root directory be deleted?
-   * @throws IOException on any problem.
-   */
-  public static boolean rm(FileSystem fileSystem,
-      Path path,
-      boolean recursive,
-      boolean allowRootDelete) throws
-      IOException {
-    if (fileSystem != null) {
-      rejectRootOperation(path, allowRootDelete);
-      if (fileSystem.exists(path)) {
-        return fileSystem.delete(path, recursive);
-      }
-    }
-    return false;
-
-  }
-
-  /**
-   * Block any operation on the root path. This is a safety check
-   * @param path path in the filesystem
-   * @param allowRootOperation can the root directory be manipulated?
-   * @throws IOException if the operation was rejected
-   */
-  public static void rejectRootOperation(Path path,
-      boolean allowRootOperation) throws IOException {
-    if (path.isRoot() && !allowRootOperation) {
-      throw new IOException("Root directory operation rejected: " + path);
-    }
-  }
-
-  /**
-   * Block any operation on the root path. This is a safety check
-   * @param path path in the filesystem
-   * @throws IOException if the operation was rejected
-   */
-  public static void rejectRootOperation(Path path) throws IOException {
-    rejectRootOperation(path, false);
-  }
-
-
-  public static void noteAction(String action) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("==============  "+ action +" =============");
-    }
-  }
-
-  /**
-   * downgrade a failure to a message and a warning, then an
-   * exception for the Junit test runner to mark as failed
-   * @param message text message
-   * @param failure what failed
-   * @throws AssumptionViolatedException always
-   */
-  public static void downgrade(String message, Throwable failure) {
-    LOG.warn("Downgrading test " + message, failure);
-    AssumptionViolatedException ave =
-      new AssumptionViolatedException(failure, null);
-    throw ave;
-  }
-
-  /**
-   * report an overridden test as unsupported
-   * @param message message to use in the text
-   * @throws AssumptionViolatedException always
-   */
-  public static void unsupported(String message) {
-    skip(message);
-  }
-
-  /**
-   * report a test has been skipped for some reason
-   * @param message message to use in the text
-   * @throws AssumptionViolatedException always
-   */
-  public static void skip(String message) {
-    LOG.info("Skipping: {}", message);
-    throw new AssumptionViolatedException(message);
-  }
-
-  /**
-   * Fail with an exception that was received
-   * @param text text to use in the exception
-   * @param thrown a (possibly null) throwable to init the cause with
-   * @throws AssertionError with the text and throwable -always
-   */
-  public static void fail(String text, Throwable thrown) {
-    AssertionError e = new AssertionError(text);
-    e.initCause(thrown);
-    throw e;
-  }
-
-  /**
-   * Make an assertion about the length of a file
-   * @param fs filesystem
-   * @param path path of the file
-   * @param expected expected length
-   * @throws IOException on File IO problems
-   */
-  public static void assertFileHasLength(FileSystem fs, Path path,
-                                         int expected) throws IOException {
-    FileStatus status = fs.getFileStatus(path);
-    assertEquals(
-      "Wrong file length of file " + path + " status: " + status,
-      expected,
-      status.getLen());
-  }
-
-  /**
-   * Assert that a path refers to a directory
-   * @param fs filesystem
-   * @param path path of the directory
-   * @throws IOException on File IO problems
-   */
-  public static void assertIsDirectory(FileSystem fs,
-                                       Path path) throws IOException {
-    FileStatus fileStatus = fs.getFileStatus(path);
-    assertIsDirectory(fileStatus);
-  }
-
-  /**
-   * Assert that a path refers to a directory
-   * @param fileStatus stats to check
-   */
-  public static void assertIsDirectory(FileStatus fileStatus) {
-    assertTrue("Should be a directory -but isn't: " + fileStatus,
-               fileStatus.isDirectory());
-  }
-
-  /**
-   * Write the text to a file, returning the converted byte array
-   * for use in validating the round trip
-   * @param fs filesystem
-   * @param path path of file
-   * @param text text to write
-   * @param overwrite should the operation overwrite any existing file?
-   * @return the read bytes
-   * @throws IOException on IO problems
-   */
-  public static byte[] writeTextFile(FileSystem fs,
-                                   Path path,
-                                   String text,
-                                   boolean overwrite) throws IOException {
-    byte[] bytes = new byte[0];
-    if (text != null) {
-      bytes = toAsciiByteArray(text);
-    }
-    createFile(fs, path, overwrite, bytes);
-    return bytes;
-  }
-
-  /**
-   * Create a file
-   * @param fs filesystem
-   * @param path       path to write
-   * @param overwrite overwrite flag
-   * @param data source dataset. Can be null
-   * @throws IOException on any problem
-   */
-  public static void createFile(FileSystem fs,
-                                 Path path,
-                                 boolean overwrite,
-                                 byte[] data) throws IOException {
-    FSDataOutputStream stream = fs.create(path, overwrite);
-    if (data != null && data.length > 0) {
-      stream.write(data);
-    }
-    stream.close();
-  }
-
-  /**
-   * Touch a file
-   * @param fs filesystem
-   * @param path path
-   * @throws IOException IO problems
-   */
-  public static void touch(FileSystem fs,
-                           Path path) throws IOException {
-    createFile(fs, path, true, null);
-  }
-
-  /**
-   * Delete a file/dir and assert that delete() returned true
-   * <i>and</i> that the path no longer exists. This variant rejects
-   * all operations on root directories
-   * @param fs filesystem
-   * @param file path to delete
-   * @param recursive flag to enable recursive delete
-   * @throws IOException IO problems
-   */
-  public static void assertDeleted(FileSystem fs,
-                                   Path file,
-                                   boolean recursive) throws IOException {
-    assertDeleted(fs, file, recursive, false);
-  }
-
-  /**
-   * Delete a file/dir and assert that delete() returned true
-   * <i>and</i> that the path no longer exists. This variant rejects
-   * all operations on root directories
-   * @param fs filesystem
-   * @param file path to delete
-   * @param recursive flag to enable recursive delete
-   * @param allowRootOperations can the root dir be deleted?
-   * @throws IOException IO problems
-   */
-  public static void assertDeleted(FileSystem fs,
-      Path file,
-      boolean recursive,
-      boolean allowRootOperations) throws IOException {
-    rejectRootOperation(file, allowRootOperations);
-    assertPathExists(fs, "about to be deleted file", file);
-    boolean deleted = fs.delete(file, recursive);
-    String dir = ls(fs, file.getParent());
-    assertTrue("Delete failed on " + file + ": " + dir, deleted);
-    assertPathDoesNotExist(fs, "Deleted file", file);
-  }
-
-  /**
-   * Read in "length" bytes, convert to an ascii string
-   * @param fs filesystem
-   * @param path path to read
-   * @param length #of bytes to read.
-   * @return the bytes read and converted to a string
-   * @throws IOException IO problems
-   */
-  public static String readBytesToString(FileSystem fs,
-                                  Path path,
-                                  int length) throws IOException {
-    FSDataInputStream in = fs.open(path);
-    try {
-      byte[] buf = new byte[length];
-      in.readFully(0, buf);
-      return toChar(buf);
-    } finally {
-      in.close();
-    }
-  }
-
-  /**
-   * Take an array of filestats and convert to a string (prefixed w/ a [01] counter
-   * @param stats array of stats
-   * @param separator separator after every entry
-   * @return a stringified set
-   */
-  public static String fileStatsToString(FileStatus[] stats, String separator) {
-    StringBuilder buf = new StringBuilder(stats.length * 128);
-    for (int i = 0; i < stats.length; i++) {
-      buf.append(String.format("[%02d] %s", i, stats[i])).append(separator);
-    }
-    return buf.toString();
-  }
-
-  /**
-   * List a directory
-   * @param fileSystem FS
-   * @param path path
-   * @return a directory listing or failure message
-   * @throws IOException
-   */
-  public static String ls(FileSystem fileSystem, Path path) throws IOException {
-    if (path == null) {
-      //surfaces when someone calls getParent() on something at the top of the path
-      return "/";
-    }
-    FileStatus[] stats;
-    String pathtext = "ls " + path;
-    try {
-      stats = fileSystem.listStatus(path);
-    } catch (FileNotFoundException e) {
-      return pathtext + " -file not found";
-    } catch (IOException e) {
-      return pathtext + " -failed: " + e;
-    }
-    return dumpStats(pathtext, stats);
-  }
-
-  public static String dumpStats(String pathname, FileStatus[] stats) {
-    return pathname + fileStatsToString(stats, "\n");
-  }
-
-   /**
-   * Assert that a file exists and whose {@link FileStatus} entry
-   * declares that this is a file and not a symlink or directory.
-   * @param fileSystem filesystem to resolve path against
-   * @param filename name of the file
-   * @throws IOException IO problems during file operations
-   */
-  public static void assertIsFile(FileSystem fileSystem, Path filename) throws
-                                                                 IOException {
-    assertPathExists(fileSystem, "Expected file", filename);
-    FileStatus status = fileSystem.getFileStatus(filename);
-    assertIsFile(filename, status);
-  }
-
-  /**
-   * Assert that a file exists and whose {@link FileStatus} entry
-   * declares that this is a file and not a symlink or directory.
-   * @param filename name of the file
-   * @param status file status
-   */
-  public static void assertIsFile(Path filename, FileStatus status) {
-    String fileInfo = filename + "  " + status;
-    assertFalse("File claims to be a directory " + fileInfo,
-                status.isDirectory());
-    assertFalse("File claims to be a symlink " + fileInfo,
-                       status.isSymlink());
-  }
-
-  /**
-   * Create a dataset for use in the tests; all data is in the range
-   * base to (base+modulo-1) inclusive
-   * @param len length of data
-   * @param base base of the data
-   * @param modulo the modulo
-   * @return the newly generated dataset
-   */
-  public static byte[] dataset(int len, int base, int modulo) {
-    byte[] dataset = new byte[len];
-    for (int i = 0; i < len; i++) {
-      dataset[i] = (byte) (base + (i % modulo));
-    }
-    return dataset;
-  }
-
-  /**
-   * Assert that a path exists -but make no assertions as to the
-   * type of that entry
-   *
-   * @param fileSystem filesystem to examine
-   * @param message message to include in the assertion failure message
-   * @param path path in the filesystem
-   * @throws FileNotFoundException raised if the path is missing
-   * @throws IOException IO problems
-   */
-  public static void assertPathExists(FileSystem fileSystem, String message,
-                               Path path) throws IOException {
-    if (!fileSystem.exists(path)) {
-      //failure, report it
-      String listing = ls(fileSystem, path.getParent());
-      throw new FileNotFoundException(message + ": not found " + path
-        + " in \"" + path.getParent() + "\" :\n" + listing);
-    }
-  }
-
-  /**
-   * Assert that a path does not exist
-   *
-   * @param fileSystem filesystem to examine
-   * @param message message to include in the assertion failure message
-   * @param path path in the filesystem
-   * @throws IOException IO problems
-   */
-  public static void assertPathDoesNotExist(FileSystem fileSystem,
-                                            String message,
-                                            Path path) throws IOException {
-    try {
-      FileStatus status = fileSystem.getFileStatus(path);
-      fail(message + ": unexpectedly found " + path + " as  " + status);
-    } catch (FileNotFoundException expected) {
-      //this is expected
-
-    }
-  }
-
-  /**
-   * Assert that a FileSystem.listStatus on a dir finds the subdir/child entry
-   * @param fs filesystem
-   * @param dir directory to scan
-   * @param subdir full path to look for
-   * @throws IOException IO probles
-   */
-  public static void assertListStatusFinds(FileSystem fs,
-                                           Path dir,
-                                           Path subdir) throws IOException {
-    FileStatus[] stats = fs.listStatus(dir);
-    boolean found = false;
-    StringBuilder builder = new StringBuilder();
-    for (FileStatus stat : stats) {
-      builder.append(stat.toString()).append('\n');
-      if (stat.getPath().equals(subdir)) {
-        found = true;
-      }
-    }
-    assertTrue("Path " + subdir
-                      + " not found in directory " + dir + ":" + builder,
-                      found);
-  }
-
-  /**
-   * Test for the host being an OSX machine
-   * @return true if the JVM thinks that is running on OSX
-   */
-  public static boolean isOSX() {
-    return System.getProperty("os.name").contains("OS X");
-  }
-
-  /**
-   * compare content of file operations using a double byte array
-   * @param concat concatenated files
-   * @param bytes bytes
-   */
-  public static void validateFileContent(byte[] concat, byte[][] bytes) {
-    int idx = 0;
-    boolean mismatch = false;
-
-    for (byte[] bb : bytes) {
-      for (byte b : bb) {
-        if (b != concat[idx++]) {
-          mismatch = true;
-          break;
-        }
-      }
-      if (mismatch)
-        break;
-    }
-    assertFalse("File content of file is not as expected at offset " + idx,
-                mismatch);
-  }
-
-  /**
-   * Receives test data from the given input file and checks the size of the
-   * data as well as the pattern inside the received data.
-   *
-   * @param fs FileSystem
-   * @param path Input file to be checked
-   * @param expectedSize the expected size of the data to be read from the
-   *        input file in bytes
-   * @param bufferLen Pattern length
-   * @param modulus   Pattern modulus
-   * @throws IOException
-   *         thrown if an error occurs while reading the data
-   */
-  public static void verifyReceivedData(FileSystem fs, Path path,
-                                      final long expectedSize,
-                                      final int bufferLen,
-                                      final int modulus) throws IOException {
-    final byte[] testBuffer = new byte[bufferLen];
-
-    long totalBytesRead = 0;
-    int nextExpectedNumber = 0;
-    final InputStream inputStream = fs.open(path);
-    try {
-      while (true) {
-        final int bytesRead = inputStream.read(testBuffer);
-        if (bytesRead < 0) {
-          break;
-        }
-
-        totalBytesRead += bytesRead;
-
-        for (int i = 0; i < bytesRead; ++i) {
-          if (testBuffer[i] != nextExpectedNumber) {
-            throw new IOException("Read number " + testBuffer[i]
-                + " but expected " + nextExpectedNumber);
-          }
-
-          ++nextExpectedNumber;
-
-          if (nextExpectedNumber == modulus) {
-            nextExpectedNumber = 0;
-          }
-        }
-      }
-
-      if (totalBytesRead != expectedSize) {
-        throw new IOException("Expected to read " + expectedSize +
-            " bytes but only received " + totalBytesRead);
-      }
-    } finally {
-      inputStream.close();
-    }
-  }
-
-  /**
-   * Generates test data of the given size according to some specific pattern
-   * and writes it to the provided output file.
-   *
-   * @param fs FileSystem
-   * @param path Test file to be generated
-   * @param size The size of the test data to be generated in bytes
-   * @param bufferLen Pattern length
-   * @param modulus   Pattern modulus
-   * @throws IOException
-   *         thrown if an error occurs while writing the data
-   */
-  public static long generateTestFile(FileSystem fs, Path path,
-                                      final long size,
-                                      final int bufferLen,
-                                      final int modulus) throws IOException {
-    final byte[] testBuffer = new byte[bufferLen];
-    for (int i = 0; i < testBuffer.length; ++i) {
-      testBuffer[i] = (byte) (i % modulus);
-    }
-
-    final OutputStream outputStream = fs.create(path, false);
-    long bytesWritten = 0;
-    try {
-      while (bytesWritten < size) {
-        final long diff = size - bytesWritten;
-        if (diff < testBuffer.length) {
-          outputStream.write(testBuffer, 0, (int) diff);
-          bytesWritten += diff;
-        } else {
-          outputStream.write(testBuffer);
-          bytesWritten += testBuffer.length;
-        }
-      }
-
-      return bytesWritten;
-    } finally {
-      outputStream.close();
-    }
-  }
-
-  /**
-   * Creates and reads a file with the given size. The test file is generated
-   * according to a specific pattern so it can be easily verified even if it's
-   * a multi-GB one.
-   * During the read phase the incoming data stream is also checked against
-   * this pattern.
-   *
-   * @param fs FileSystem
-   * @param parent Test file parent dir path
-   * @throws IOException
-   *    thrown if an I/O error occurs while writing or reading the test file
-   */
-  public static void createAndVerifyFile(FileSystem fs, Path parent, final long fileSize)
-      throws IOException {
-    int testBufferSize = fs.getConf()
-        .getInt(IO_CHUNK_BUFFER_SIZE, DEFAULT_IO_CHUNK_BUFFER_SIZE);
-    int modulus = fs.getConf()
-        .getInt(IO_CHUNK_MODULUS_SIZE, DEFAULT_IO_CHUNK_MODULUS_SIZE);
-
-    final String objectName = UUID.randomUUID().toString();
-    final Path objectPath = new Path(parent, objectName);
-
-    // Write test file in a specific pattern
-    assertEquals(fileSize,
-        generateTestFile(fs, objectPath, fileSize, testBufferSize, modulus));
-    assertPathExists(fs, "not created successful", objectPath);
-
-    // Now read the same file back and verify its content
-    try {
-      verifyReceivedData(fs, objectPath, fileSize, testBufferSize, modulus);
-    } finally {
-      // Delete test file
-      fs.delete(objectPath, false);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/tools/TestUtility.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/tools/TestUtility.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/tools/TestUtility.java
deleted file mode 100644
index 78ce3e7..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/tools/TestUtility.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.slider.tools;
-
-import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
-import org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream;
-import org.apache.commons.compress.utils.IOUtils;
-import org.junit.Assert;
-import org.junit.rules.TemporaryFolder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-
-/**
- *  Various utility methods
- *  Byte comparison methods are from
- *  <code>org.apache.hadoop.fs.contract.ContractTestUtils</code>
- */
-public class TestUtility {
-  protected static final Logger log =
-      LoggerFactory.getLogger(TestUtility.class);
-
-  public static void addDir(File dirObj, ZipArchiveOutputStream zipFile, String prefix) throws IOException {
-    for (File file : dirObj.listFiles()) {
-      if (file.isDirectory()) {
-        addDir(file, zipFile, prefix + file.getName() + File.separator);
-      } else {
-        log.info("Adding to zip - " + prefix + file.getName());
-        zipFile.putArchiveEntry(new ZipArchiveEntry(prefix + file.getName()));
-        IOUtils.copy(new FileInputStream(file), zipFile);
-        zipFile.closeArchiveEntry();
-      }
-    }
-  }
-
-  public static void zipDir(String zipFile, String dir) throws IOException {
-    File dirObj = new File(dir);
-    ZipArchiveOutputStream out = new ZipArchiveOutputStream(new FileOutputStream(zipFile));
-    log.info("Creating : {}", zipFile);
-    try {
-      addDir(dirObj, out, "");
-    } finally {
-      out.close();
-    }
-  }
-
-  public static String createAppPackage(
-      TemporaryFolder folder, String subDir, String pkgName, String srcPath) throws IOException {
-    String zipFileName;
-    File pkgPath = folder.newFolder(subDir);
-    File zipFile = new File(pkgPath, pkgName).getAbsoluteFile();
-    zipFileName = zipFile.getAbsolutePath();
-    TestUtility.zipDir(zipFileName, srcPath);
-    log.info("Created temporary zip file at {}", zipFileName);
-    return zipFileName;
-  }
-
-
-  /**
-   * Assert that tthe array original[0..len] and received[] are equal.
-   * A failure triggers the logging of the bytes near where the first
-   * difference surfaces.
-   * @param original source data
-   * @param received actual
-   * @param len length of bytes to compare
-   */
-  public static void compareByteArrays(byte[] original,
-      byte[] received,
-      int len) {
-    Assert.assertEquals("Number of bytes read != number written",
-        len, received.length);
-    int errors = 0;
-    int first_error_byte = -1;
-    for (int i = 0; i < len; i++) {
-      if (original[i] != received[i]) {
-        if (errors == 0) {
-          first_error_byte = i;
-        }
-        errors++;
-      }
-    }
-
-    if (errors > 0) {
-      String message = String.format(" %d errors in file of length %d",
-          errors, len);
-      log.warn(message);
-      // the range either side of the first error to print
-      // this is a purely arbitrary number, to aid user debugging
-      final int overlap = 10;
-      for (int i = Math.max(0, first_error_byte - overlap);
-           i < Math.min(first_error_byte + overlap, len);
-           i++) {
-        byte actual = received[i];
-        byte expected = original[i];
-        String letter = toChar(actual);
-        String line = String.format("[%04d] %2x %s\n", i, actual, letter);
-        if (expected != actual) {
-          line = String.format("[%04d] %2x %s -expected %2x %s\n",
-              i,
-              actual,
-              letter,
-              expected,
-              toChar(expected));
-        }
-        log.warn(line);
-      }
-      Assert.fail(message);
-    }
-  }
-  /**
-   * Convert a byte to a character for printing. If the
-   * byte value is < 32 -and hence unprintable- the byte is
-   * returned as a two digit hex value
-   * @param b byte
-   * @return the printable character string
-   */
-  public static String toChar(byte b) {
-    if (b >= 0x20) {
-      return Character.toString((char) b);
-    } else {
-      return String.format("%02x", b);
-    }
-  }
-
-  /**
-   * Convert a buffer to a string, character by character
-   * @param buffer input bytes
-   * @return a string conversion
-   */
-  public static String toChar(byte[] buffer) {
-    StringBuilder builder = new StringBuilder(buffer.length);
-    for (byte b : buffer) {
-      builder.append(toChar(b));
-    }
-    return builder.toString();
-  }
-
-  public static byte[] toAsciiByteArray(String s) {
-    char[] chars = s.toCharArray();
-    int len = chars.length;
-    byte[] buffer = new byte[len];
-    for (int i = 0; i < len; i++) {
-      buffer[i] = (byte) (chars[i] & 0xff);
-    }
-    return buffer;
-  }
-
-  /**
-   * Create a dataset for use in the tests; all data is in the range
-   * base to (base+modulo-1) inclusive
-   * @param len length of data
-   * @param base base of the data
-   * @param modulo the modulo
-   * @return the newly generated dataset
-   */
-  public static byte[] dataset(int len, int base, int modulo) {
-    byte[] dataset = new byte[len];
-    for (int i = 0; i < len; i++) {
-      dataset[i] = (byte) (base + (i % modulo));
-    }
-    return dataset;
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: YARN-6398. Support to add native-service specific details in new YARN UI. Contributed by Akhil PB.

Posted by ji...@apache.org.
YARN-6398. Support to add native-service specific details in new YARN UI. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56b6c2b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56b6c2b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56b6c2b3

Branch: refs/heads/yarn-native-services
Commit: 56b6c2b31e24e1708b124eb993efb84d109f9c58
Parents: 6d83d95
Author: Sunil G <su...@apache.org>
Authored: Sat May 6 23:48:19 2017 +0530
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:25 2017 -0700

----------------------------------------------------------------------
 .../app/adapters/yarn-component-instance.js     |  32 ++++++
 .../app/adapters/yarn-service-component.js      |  32 ++++++
 .../webapp/app/adapters/yarn-service-info.js    |  32 ++++++
 .../main/webapp/app/adapters/yarn-servicedef.js |  13 +++
 .../webapp/app/components/confirm-dialog.js     |  31 ++++++
 .../main/webapp/app/components/metrics-table.js |  24 ++++
 .../app/controllers/yarn-app/components.js      |  63 +++++++++++
 .../webapp/app/controllers/yarn-app/configs.js  |  24 ++++
 .../webapp/app/controllers/yarn-app/info.js     |  60 ++++++++++
 .../app/controllers/yarn-component-instance.js  |  59 ++++++++++
 .../controllers/yarn-component-instance/info.js |  25 +++++
 .../app/controllers/yarn-component-instances.js |  59 ++++++++++
 .../yarn-component-instances/configs.js         |  25 +++++
 .../yarn-component-instances/info.js            |  62 +++++++++++
 .../webapp/app/helpers/check-availability.js    |  28 +++++
 .../app/models/yarn-component-instance.js       |  51 +++++++++
 .../webapp/app/models/yarn-service-component.js |  46 ++++++++
 .../main/webapp/app/models/yarn-service-info.js |  57 ++++++++++
 .../src/main/webapp/app/router.js               |   9 ++
 .../webapp/app/routes/yarn-app/components.js    |  49 ++++++++
 .../main/webapp/app/routes/yarn-app/configs.js  |  52 +++++++++
 .../src/main/webapp/app/routes/yarn-app/info.js |  12 +-
 .../app/routes/yarn-component-instance.js       |  29 +++++
 .../app/routes/yarn-component-instance/info.js  |  45 ++++++++
 .../app/routes/yarn-component-instances.js      |  29 +++++
 .../routes/yarn-component-instances/configs.js  |  44 ++++++++
 .../app/routes/yarn-component-instances/info.js |  53 +++++++++
 .../app/serializers/yarn-component-instance.js  |  72 ++++++++++++
 .../app/serializers/yarn-service-component.js   |  77 +++++++++++++
 .../webapp/app/serializers/yarn-service-info.js |  87 +++++++++++++++
 .../src/main/webapp/app/styles/app.css          |  14 ++-
 .../app/templates/components/confirm-dialog.hbs |  37 +++++++
 .../app/templates/components/metrics-table.hbs  |  82 ++++++++++++++
 .../src/main/webapp/app/templates/yarn-app.hbs  |   8 ++
 .../app/templates/yarn-app/components.hbs       |  23 ++++
 .../webapp/app/templates/yarn-app/configs.hbs   |  57 ++++++++++
 .../main/webapp/app/templates/yarn-app/info.hbs | 111 ++++++++++++++-----
 .../app/templates/yarn-component-instance.hbs   |  43 +++++++
 .../templates/yarn-component-instance/info.hbs  |  81 ++++++++++++++
 .../app/templates/yarn-component-instances.hbs  |  46 ++++++++
 .../yarn-component-instances/configs.hbs        |  53 +++++++++
 .../templates/yarn-component-instances/info.hbs |  28 +++++
 .../yarn-component-instances/loading.hbs        |  23 ++++
 .../src/main/webapp/config/default-config.js    |   2 +-
 .../components/confirm-dialog-test.js           |  43 +++++++
 .../components/metrics-table-test.js            |  43 +++++++
 .../adapters/yarn-component-instance-test.js    |  30 +++++
 .../adapters/yarn-service-component-test.js     |  30 +++++
 .../unit/adapters/yarn-service-info-test.js     |  30 +++++
 .../controllers/yarn-app/components-test.js     |  30 +++++
 .../unit/controllers/yarn-app/configs-test.js   |  30 +++++
 .../controllers/yarn-component-instance-test.js |  30 +++++
 .../yarn-component-instance/info-test.js        |  30 +++++
 .../yarn-component-instances-test.js            |  30 +++++
 .../yarn-component-instances/configs-test.js    |  30 +++++
 .../yarn-component-instances/info-test.js       |  30 +++++
 .../unit/helpers/check-availability-test.js     |  28 +++++
 .../unit/models/yarn-component-instance-test.js |  30 +++++
 .../unit/models/yarn-service-component-test.js  |  30 +++++
 .../tests/unit/models/yarn-service-info-test.js |  30 +++++
 .../unit/routes/yarn-app/components-test.js     |  29 +++++
 .../tests/unit/routes/yarn-app/configs-test.js  |  29 +++++
 .../unit/routes/yarn-component-instance-test.js |  29 +++++
 .../routes/yarn-component-instance/info-test.js |  29 +++++
 .../routes/yarn-component-instances-test.js     |  29 +++++
 .../yarn-component-instances/configs-test.js    |  29 +++++
 .../yarn-component-instances/info-test.js       |  29 +++++
 .../serializers/yarn-component-instance-test.js |  33 ++++++
 .../serializers/yarn-service-component-test.js  |  33 ++++++
 .../unit/serializers/yarn-service-info-test.js  |  33 ++++++
 70 files changed, 2666 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-component-instance.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-component-instance.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-component-instance.js
new file mode 100644
index 0000000..062a006
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-component-instance.js
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import AbstractAdapter from './abstract';
+
+export default AbstractAdapter.extend({
+  address: "timelineWebAddress",
+  restNameSpace: "timelineService",
+  serverName: "ATS",
+
+  urlForQuery(query/*, modelName*/) {
+    var url = this.buildURL();
+    url += '/' + query.appId + '/entities/COMPONENT_INSTANCE?fields=ALL';
+    delete query.appId;
+    return url;
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service-component.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service-component.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service-component.js
new file mode 100644
index 0000000..c356192
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service-component.js
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import AbstractAdapter from './abstract';
+
+export default AbstractAdapter.extend({
+  address: "timelineWebAddress",
+  restNameSpace: "timelineService",
+  serverName: "ATS",
+
+  urlForQuery(query/*, modelName*/) {
+    var url = this.buildURL();
+    url += '/' + query.appId + '/entities/COMPONENT?fields=ALL';
+    delete query.appId;
+    return url;
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service-info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service-info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service-info.js
new file mode 100644
index 0000000..dec3e50
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service-info.js
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import AbstractAdapter from './abstract';
+
+export default AbstractAdapter.extend({
+  address: "timelineWebAddress",
+  restNameSpace: "timelineService",
+  serverName: "ATS",
+
+  urlForQueryRecord(query/*, modelName*/) {
+    var url = this.buildURL();
+    url += '/' + query.appId + '/entities/SERVICE_ATTEMPT?fields=ALL';
+    delete query.appId;
+    return url;
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
index c362f5e..dc5dbfd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
@@ -27,5 +27,18 @@ export default RESTAbstractAdapter.extend({
   deployService(request) {
     var url = this.buildURL();
     return this.ajax(url, "POST", {data: request});
+  },
+
+  stopService(serviceName) {
+    var url = this.buildURL();
+    url += "/" + serviceName;
+    var data = {"state": "STOPPED", "name": serviceName};
+    return this.ajax(url, "PUT", {data: data});
+  },
+
+  deleteService(serviceName) {
+    var url = this.buildURL();
+    url += "/" + serviceName;
+    return this.ajax(url, "DELETE", {data: {}});
   }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/confirm-dialog.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/confirm-dialog.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/confirm-dialog.js
new file mode 100644
index 0000000..a6f518b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/confirm-dialog.js
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+  dialogId: "confirmModalDialog",
+  title: "Confirm",
+  message: "Are you sure?",
+
+  actions: {
+    yesConfirmed() {
+      this.sendAction();
+    }
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/metrics-table.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/metrics-table.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/metrics-table.js
new file mode 100644
index 0000000..62b2fc5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/metrics-table.js
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+  metrics: null,
+  type: ''
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
new file mode 100644
index 0000000..947cb98
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+import ColumnDef from 'em-table/utils/column-definition';
+
+export default Ember.Controller.extend({
+  queryParams: ["service"],
+  service: undefined,
+
+  tableColumns: Ember.computed('model.appId', 'model.serviceName', function() {
+    var cols = [];
+    var service = this.get('model.serviceName');
+    var appId = this.get('model.appId');
+
+    cols.push({
+      id: 'name',
+      headerTitle: 'Component Group',
+      contentPath: 'name',
+      cellComponentName: 'em-table-linked-cell',
+      getCellContent: function(row) {
+        return {
+          displayText: row.get('name'),
+          href: `#/yarn-component-instances/${row.get('name')}/info?service=${service}&&appid=${appId}`
+        };
+      }
+    }, {
+      id: 'vcores',
+      headerTitle: 'VCores',
+      contentPath: 'vcores'
+    }, {
+      id: 'memory',
+      headerTitle: 'Memory (MB)',
+      contentPath: 'memory'
+    }, {
+      id: 'instances',
+      headerTitle: '# Components',
+      contentPath: 'instances',
+      observePath: true
+    }, {
+      id: 'createdDate',
+      headerTitle: 'Created Time',
+      contentPath: 'createdDate'
+    });
+
+    return ColumnDef.make(cols);
+  })
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/configs.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/configs.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/configs.js
new file mode 100644
index 0000000..a6cba9e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/configs.js
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Controller.extend({
+  queryParams: ["service"],
+  service: undefined
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js
index f9652f9..3de6687 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js
@@ -21,6 +21,66 @@ import Ember from 'ember';
 export default Ember.Controller.extend({
   queryParams: ["service"],
   service: undefined,
+  isLoading: false,
+  actionResponse: null,
+
+  actions: {
+    showStopServiceConfirm() {
+      this.set('actionResponse', null);
+      Ember.$("#stopServiceConfirmDialog").modal('show');
+    },
+
+    stopService() {
+      var self = this;
+      Ember.$("#stopServiceConfirmDialog").modal('hide');
+      var adapter = this.store.adapterFor('yarn-servicedef');
+      self.set('isLoading', true);
+      adapter.stopService(this.get('service')).then(function() {
+        self.set('actionResponse', {msg: 'Service stopped successfully. Auto refreshing in 5 seconds.', type: 'success'});
+        Ember.run.later(self, function() {
+          this.set('actionResponse', null);
+          this.send("refresh");
+        }, 5000);
+      }, function(errr) {
+        let messg = errr.diagnostics || 'Error: Stop service failed!';
+        self.set('actionResponse', {msg: messg, type: 'error'});
+      }).finally(function() {
+        self.set('isLoading', false);
+      });
+    },
+
+    showDeleteServiceConfirm() {
+      this.set('actionResponse', null);
+      Ember.$("#deleteServiceConfirmDialog").modal('show');
+    },
+
+    deleteService() {
+      var self = this;
+      Ember.$("#deleteServiceConfirmDialog").modal('hide');
+      var adapter = this.store.adapterFor('yarn-servicedef');
+      self.set('isLoading', true);
+      adapter.deleteService(this.get('service')).then(function() {
+        self.set('actionResponse', {msg: 'Service deleted successfully. Redirecting to services in 5 seconds.', type: 'success'});
+        Ember.run.later(self, function() {
+          this.set('actionResponse', null);
+          this.transitionToRoute("yarn-services");
+        }, 5000);
+      }, function(errr) {
+        let messg = errr.diagnostics || 'Error: Delete service failed!';
+        self.set('actionResponse', {msg: messg, type: 'error'});
+      }).finally(function() {
+        self.set('isLoading', false);
+      });
+    },
+
+    resetActionResponse() {
+      this.set('actionResponse', null);
+    }
+  },
+
+  isRunningService: Ember.computed('model.serviceName', 'model.app.state', function() {
+    return this.get('service') !== undefined && this.get('model.app.state') === 'RUNNING';
+  }),
 
   amHostHttpAddressFormatted: Ember.computed('model.app.amHostHttpAddress', function() {
     var amHostAddress = this.get('model.app.amHostHttpAddress');

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance.js
new file mode 100644
index 0000000..4b8dbf4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance.js
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Controller.extend({
+  componentName: '',
+  instanceName: '',
+  serviceName: '',
+  appId: '',
+
+  breadcrumbs: [{
+    text: "Home",
+    routeName: 'application'
+  }, {
+    text: "Services",
+    routeName: 'yarn-services',
+  }],
+
+  updateBreadcrumbs(appId, serviceName, componentName, instanceName) {
+    var crumbs =  [{
+      text: "Home",
+      routeName: 'application'
+    }, {
+      text: "Services",
+      routeName: 'yarn-services',
+    }];
+    if (appId && serviceName && componentName && instanceName) {
+      crumbs.push({
+        text: `${serviceName} [${appId}]`,
+        href: `#/yarn-app/${appId}/info?service=${serviceName}`
+      }, {
+        text: 'Components',
+        href: `#/yarn-app/${appId}/components?service=${serviceName}`
+      }, {
+        text: `${componentName}`,
+        href: `#/yarn-component-instances/${componentName}/info?service=${serviceName}&&appid=${appId}`
+      }, {
+        text: `${instanceName}`
+      });
+    }
+    this.set('breadcrumbs', crumbs);
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
new file mode 100644
index 0000000..e3abcb7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Controller.extend({
+  queryParams: ["appid", "service"],
+  appid: undefined,
+  service: undefined
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances.js
new file mode 100644
index 0000000..965631c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances.js
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Controller.extend({
+  componentName: '',
+  serviceName: '',
+  appId: '',
+
+  breadcrumbs: [{
+    text: "Home",
+    routeName: 'application'
+  }, {
+    text: "Services",
+    routeName: 'yarn-services',
+  }],
+
+  updateBreadcrumbs(appId, serviceName, componentName, tailCrumbs) {
+    var crumbs =  [{
+      text: "Home",
+      routeName: 'application'
+    }, {
+      text: "Services",
+      routeName: 'yarn-services',
+    }];
+    if (appId && serviceName && componentName) {
+      crumbs.push({
+        text: `${serviceName} [${appId}]`,
+        href: `#/yarn-app/${appId}/info?service=${serviceName}`
+      }, {
+        text: 'Components',
+        href: `#/yarn-app/${appId}/components?service=${serviceName}`
+      }, {
+        text: `${componentName}`,
+        href: `#/yarn-component-instances/${componentName}/info?service=${serviceName}&&appid=${appId}`
+      });
+    }
+    if (tailCrumbs) {
+      crumbs.pushObjects(tailCrumbs);
+    }
+    this.set('breadcrumbs', crumbs);
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/configs.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/configs.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/configs.js
new file mode 100644
index 0000000..dac6498
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/configs.js
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Controller.extend({
+  queryParams: ["service", "appid"],
+  appid: undefined,
+  service: undefined
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
new file mode 100644
index 0000000..a676b34
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+import ColumnDef from 'em-table/utils/column-definition';
+
+export default Ember.Controller.extend({
+  queryParams: ["service", "appid"],
+  appid: undefined,
+  service: undefined,
+
+  tableColumns: Ember.computed('model.appId', 'model.serviceName', function() {
+    var cols = [];
+    var appId = this.get('model.appId');
+    var serviceName = this.get('model.serviceName');
+
+    cols.push({
+      id: 'instanceName',
+      headerTitle: 'Component Name',
+      contentPath: 'instanceName',
+      cellComponentName: 'em-table-linked-cell',
+      getCellContent: function(row) {
+        var component = row.get('component');
+        var instance = row.get('instanceName');
+        return {
+          text: instance,
+          href: `#/yarn-component-instance/${component}/instances/${instance}/info?appid=${appId}&&service=${serviceName}`
+        };
+      }
+    }, {
+      id: 'containerId',
+      headerTitle: 'Current Container Id',
+      contentPath: 'containerId',
+      minWidth: '350px'
+    }, {
+      id: 'state',
+      headerTitle: 'State',
+      contentPath: 'state'
+    }, {
+      id: 'startedDate',
+      headerTitle: 'Started Time',
+      contentPath: 'startedDate'
+    });
+
+    return ColumnDef.make(cols);
+  })
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/check-availability.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/check-availability.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/check-availability.js
new file mode 100644
index 0000000..4470d65
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/check-availability.js
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export function checkAvailability(params/*, hash*/) {
+  if (params[0] !== undefined && params[0] !== null && params[0] !== '') {
+    return params[0];
+  }
+  return 'N/A';
+}
+
+export default Ember.Helper.helper(checkAvailability);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-component-instance.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-component-instance.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-component-instance.js
new file mode 100644
index 0000000..532fc55
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-component-instance.js
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import DS from 'ember-data';
+import Ember from 'ember';
+import Converter from 'yarn-ui/utils/converter';
+
+export default DS.Model.extend({
+  containerId: DS.attr('string'),
+  component: DS.attr('string'),
+  instanceName: DS.attr('string'),
+  state: DS.attr('number'),
+  createdTimestamp: DS.attr('number'),
+  startedTimestamp: DS.attr('number'),
+  host: DS.attr('string'),
+  node: DS.attr('string'),
+  hostUrl: DS.attr('string'),
+  ipAddr: DS.attr('string'),
+  exitStatusCode: DS.attr('string'),
+
+  createdDate: Ember.computed('createdTimestamp', function() {
+    var timestamp = this.get('createdTimestamp');
+    if (timestamp > 0) {
+      return Converter.timeStampToDate(timestamp);
+    }
+    return 'N/A';
+  }),
+
+  startedDate: Ember.computed('startedTimestamp', function() {
+    var timestamp = this.get('startedTimestamp');
+    if (timestamp > 0) {
+      return Converter.timeStampToDate(timestamp);
+    }
+    return 'N/A';
+  })
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service-component.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service-component.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service-component.js
new file mode 100644
index 0000000..9e06419
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service-component.js
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import DS from 'ember-data';
+import Ember from 'ember';
+import Converter from 'yarn-ui/utils/converter';
+
+export default DS.Model.extend({
+  name: DS.attr('string'),
+  vcores: DS.attr('string'),
+  memory: DS.attr('string'),
+  priority: DS.attr('string'),
+  instances: DS.attr('string'),
+  createdTimestamp: DS.attr('number'),
+
+  configs: DS.attr({defaultValue: function() {
+    return Ember.A();
+  }}),
+
+  metrics: DS.attr({defaultValue: function() {
+    return Ember.Object.create();
+  }}),
+
+  createdDate: Ember.computed('createdTimestamp', function() {
+    var timestamp = this.get('createdTimestamp');
+    if (timestamp > 0) {
+      return Converter.timeStampToDate(timestamp);
+    }
+    return 'N/A';
+  })
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service-info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service-info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service-info.js
new file mode 100644
index 0000000..7b961e8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service-info.js
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import DS from 'ember-data';
+import Ember from 'ember';
+import Converter from 'yarn-ui/utils/converter';
+
+export default DS.Model.extend({
+  name: DS.attr('string'),
+  appId: DS.attr('string'),
+  state: DS.attr('string'),
+  createdTimestamp: DS.attr('number'),
+  launchTimestamp: DS.attr('number'),
+
+  quicklinks: DS.attr({defaultValue: function() {
+    return Ember.A();
+  }}),
+
+  configs: DS.attr({defaultValue: function() {
+    return Ember.A();
+  }}),
+
+  metrics: DS.attr({defaultValue: function() {
+    return Ember.Object.create();
+  }}),
+
+  createdDate: Ember.computed('createdTimestamp', function() {
+    var timestamp = this.get('createdTimestamp');
+    if (timestamp > 0) {
+      return Converter.timeStampToDate(timestamp);
+    }
+    return 'N/A';
+  }),
+
+  launchDate: Ember.computed('launchTimestamp', function() {
+    var timestamp = this.get('launchTimestamp');
+    if (timestamp > 0) {
+      return Converter.timeStampToDate(timestamp);
+    }
+    return 'N/A';
+  })
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js
index 9b3424b..c7b3d6a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js
@@ -53,7 +53,16 @@ Router.map(function() {
   this.route('yarn-app', function() {
     this.route('info', {path: '/:app_id/info'});
     this.route('attempts', {path: '/:app_id/attempts'});
+    this.route('components', {path: '/:app_id/components'});
     this.route('charts', {path: '/:app_id/charts'});
+    this.route('configs', {path: '/:app_id/configs'});
+  });
+  this.route('yarn-component-instances', function() {
+    this.route('info', {path: '/:component_name/info'});
+    this.route('configs', {path: '/:component_name/configs'});
+  });
+  this.route('yarn-component-instance', function() {
+    this.route('info', {path: '/:component_name/instances/:instance_name/info'});
   });
   this.route('yarn-app-attempt', { path: '/yarn-app-attempt/:app_attempt_id'});
   this.route('error');

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/components.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/components.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/components.js
new file mode 100644
index 0000000..8f6f40f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/components.js
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+import AbstractRoute from '../abstract';
+
+export default AbstractRoute.extend({
+  model(param, transition) {
+    transition.send('updateBreadcrumbs', param.app_id, param.service, [{text: 'Components'}]);
+    return Ember.RSVP.hash({
+      appId: param.app_id,
+      serviceName: param.service,
+      components: this.store.query('yarn-service-component', {appId: param.app_id, type: 'COMPONENT'}).catch(function() {
+        return [];
+      }),
+      instances: this.store.query('yarn-component-instance', {appId: param.app_id}).catch(function() {
+        return [];
+      })
+    });
+  },
+
+  afterModel(model) {
+    let instances = model.instances;
+    model.components.forEach(function(component) {
+      var num = instances.filterBy('component', component.get('name')).length;
+      component.set('instances', num);
+    });
+  },
+
+  unloadAll() {
+    this.store.unloadAll('yarn-service-component');
+    this.store.unloadAll('yarn-component-instance');
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/configs.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/configs.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/configs.js
new file mode 100644
index 0000000..7502481
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/configs.js
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+import AbstractRoute from '../abstract';
+
+export default AbstractRoute.extend({
+  model(param, transition) {
+    transition.send('updateBreadcrumbs', param.app_id, param.service, [{text: "Configurations & Metrics"}]);
+    return Ember.RSVP.hash({
+      appId: param.app_id,
+      serviceName: param.service,
+
+      configs: this.store.queryRecord('yarn-service-info', {appId: param.app_id}).then(function(info) {
+        if (info && info.get('configs')) {
+          return info.get('configs');
+        }
+        return [];
+      }, function() {
+        return [];
+      }),
+
+      metrics: this.store.queryRecord('yarn-service-info', {appId: param.app_id}).then(function(info) {
+        if (info && info.get('metrics')) {
+          return info.get('metrics');
+        }
+        return null;
+      }, function() {
+        return null;
+      })
+    });
+  },
+
+  unloadAll() {
+    this.store.unloadAll('yarn-service-info');
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/info.js
index 4a4b19e..7585476 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/info.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/info.js
@@ -26,12 +26,22 @@ export default AbstractRoute.extend(AppAttemptMixin, {
     return Ember.RSVP.hash({
       appId: param.app_id,
       serviceName: param.service,
-      app: this.fetchAppInfoFromRMorATS(param.app_id, this.store)
+      app: this.fetchAppInfoFromRMorATS(param.app_id, this.store),
+
+      quicklinks: this.store.queryRecord('yarn-service-info', {appId: param.app_id}).then(function(info) {
+        if (info && info.get('quicklinks')) {
+          return info.get('quicklinks');
+        }
+        return [];
+      }, function() {
+        return [];
+      })
     });
   },
 
   unloadAll() {
     this.store.unloadAll('yarn-app');
     this.store.unloadAll('yarn-app-timeline');
+    this.store.unloadAll('yarn-service-info');
   }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance.js
new file mode 100644
index 0000000..681eed5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance.js
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import AbstractRoute from './abstract';
+
+export default AbstractRoute.extend({
+  actions: {
+    updateBreadcrumbs(appId, serviceName, componentName, instanceName) {
+      var controller = this.controllerFor('yarn-component-instance');
+      controller.setProperties({appId: appId, serviceName: serviceName, componentName: componentName, instanceName: instanceName});
+      controller.updateBreadcrumbs(appId, serviceName, componentName, instanceName);
+    }
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance/info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance/info.js
new file mode 100644
index 0000000..3753c75
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance/info.js
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+import AbstractRoute from '../abstract';
+
+export default AbstractRoute.extend({
+  model(params, transition) {
+    var instanceName = params.instance_name;
+    transition.send('updateBreadcrumbs', params.appid, params.service, params.component_name, instanceName);
+    return Ember.RSVP.hash({
+      appId: params.appid,
+      serviceName: params.service,
+      componentName: params.component_name,
+      instanceName: instanceName,
+      container: this.store.query('yarn-component-instance', {appId: params.appid}).then(function(instances) {
+        if (instances && instances.findBy('instanceName', instanceName)) {
+          return instances.findBy('instanceName', instanceName);
+        }
+        return null;
+      }, function() {
+        return null;
+      }),
+    });
+  },
+
+  unloadAll() {
+    this.store.unloadAll('yarn-component-instance');
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances.js
new file mode 100644
index 0000000..0190911
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances.js
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import AbstractRoute from './abstract';
+
+export default AbstractRoute.extend({
+  actions: {
+    updateBreadcrumbs(appId, serviceName, componentName, tailCrumbs) {
+      var controller = this.controllerFor('yarn-component-instances');
+      controller.setProperties({appId: appId, componentName: componentName, serviceName: serviceName});
+      controller.updateBreadcrumbs(appId, serviceName, componentName, tailCrumbs);
+    }
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances/configs.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances/configs.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances/configs.js
new file mode 100644
index 0000000..a2540fe
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances/configs.js
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+import AbstractRoute from '../abstract';
+
+export default AbstractRoute.extend({
+  model(params, transition) {
+    var componentName = params.component_name;
+    transition.send('updateBreadcrumbs', params.appid, params.service, componentName, [{text: 'Configurations'}]);
+    return Ember.RSVP.hash({
+      appId: params.appid,
+      serviceName: params.service,
+      componentName: componentName,
+      configs: this.store.query('yarn-service-component', {appId: params.appid}).then(function(components) {
+        if (components && components.findBy('name', componentName)) {
+          return components.findBy('name', componentName).get('configs');
+        }
+        return [];
+      }, function() {
+        return [];
+      })
+    });
+  },
+
+  unloadAll() {
+    this.store.unloadAll('yarn-service-component');
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances/info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances/info.js
new file mode 100644
index 0000000..83fd420
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances/info.js
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+import AbstractRoute from '../abstract';
+
+export default AbstractRoute.extend({
+  model(params, transition) {
+    var componentName = params.component_name;
+    transition.send('updateBreadcrumbs', params.appid, params.service, componentName);
+    return Ember.RSVP.hash({
+      appId: params.appid,
+      serviceName: params.service,
+      componentName: componentName,
+      instances: this.store.query('yarn-component-instance', {appId: params.appid}).then(function(instances) {
+        if (instances && instances.filterBy('component', componentName)) {
+          return instances.filterBy('component', componentName);
+        }
+        return [];
+      }, function() {
+        return [];
+      }),
+      metrics: this.store.query('yarn-service-component', {appId: params.appid}).then(function(components) {
+        if (components && components.findBy('name', componentName)) {
+          return components.findBy('name', componentName).get('metrics');
+        }
+        return null;
+      }, function() {
+        return null;
+      })
+    });
+  },
+
+  unloadAll() {
+    this.store.unloadAll('yarn-service-component');
+    this.store.unloadAll('yarn-component-instance');
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-component-instance.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-component-instance.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-component-instance.js
new file mode 100644
index 0000000..82eb273
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-component-instance.js
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import DS from 'ember-data';
+
+export default DS.JSONAPISerializer.extend({
+  internalNormalizeSingleResponse(store, primaryModelClass, payload) {
+    var info = payload.info;
+
+    var fixedPayload = {
+      id: 'yarn_component_instance_' + payload.id,
+      type: primaryModelClass.modelName,
+      attributes: {
+        containerId: payload.id,
+        component: info.COMPONENT_NAME,
+        instanceName: info.COMPONENT_NAME + '_' + payload.instanceId,
+        state: info.STATE,
+        createdTimestamp: payload.createdtime,
+        startedTimestamp: info.LAUNCH_TIME,
+        host: info.HOSTNAME,
+        node: info.BARE_HOST,
+        hostUrl: 'N/A',
+        ipAddr: info.IP,
+        exitStatusCode: info.EXIT_STATUS_CODE
+      }
+    };
+
+    return fixedPayload;
+  },
+
+  normalizeArrayResponse(store, primaryModelClass, payload/*, id, requestType*/) {
+    var normalizedResponse = {data: []};
+    var instanceUid = {};
+
+    if (payload && Array.isArray(payload)) {
+      this.sortPayloadByCreatedTimeAscending(payload);
+
+      payload.forEach(function(container) {
+        let componentName = container.info.COMPONENT_NAME;
+        if (!instanceUid[componentName]) {
+          instanceUid[componentName] = 0;
+        }
+        container.instanceId = ++instanceUid[componentName];
+        var pl = this.internalNormalizeSingleResponse(store, primaryModelClass, container);
+        normalizedResponse.data.push(pl);
+      }.bind(this));
+    }
+
+    return normalizedResponse;
+  },
+
+  sortPayloadByCreatedTimeAscending(payload) {
+    payload.sort(function(inst1, inst2) {
+      return inst1.createdtime - inst2.createdtime;
+    });
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-component.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-component.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-component.js
new file mode 100644
index 0000000..b0261fc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-component.js
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import DS from 'ember-data';
+import Ember from 'ember';
+
+export default DS.JSONAPISerializer.extend({
+  internalNormalizeSingleResponse(store, primaryModelClass, payload) {
+    var info = payload.info;
+    var configs = payload.configs;
+    var metrics = payload.metrics;
+    var newConfigs = Ember.A();
+    var newMetrics = Ember.Object.create();
+
+    if (configs) {
+      for (let conf in configs) {
+        let confObj = Ember.Object.create({
+          name: conf,
+          value: configs[conf] || 'N/A'
+        });
+        newConfigs.push(confObj);
+      }
+    }
+
+    if (metrics) {
+      metrics.forEach(function(metric) {
+        let val = metric.values[Object.keys(metric.values)[0]];
+        newMetrics.set(metric.id, ((val !== undefined)? val : 'N/A'));
+      });
+    }
+
+    var fixedPayload = {
+      id: 'yarn_service_component_' + payload.id,
+      type: primaryModelClass.modelName,
+      attributes: {
+        name: payload.id,
+        vcores: info.RESOURCE_CPU,
+        memory: info.RESOURCE_MEMORY,
+        priority: 'N/A',
+        instances: 'N/A',
+        createdTimestamp: payload.createdtime,
+        configs: newConfigs,
+        metrics: newMetrics
+      }
+    };
+
+    return fixedPayload;
+  },
+
+  normalizeArrayResponse(store, primaryModelClass, payload/*, id, requestType*/) {
+    var normalizedResponse = {data: []};
+
+    if (payload && Array.isArray(payload)) {
+      payload.forEach(function(component) {
+        var pl = this.internalNormalizeSingleResponse(store, primaryModelClass, component);
+        normalizedResponse.data.push(pl);
+      }.bind(this));
+    }
+
+    return normalizedResponse;
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-info.js
new file mode 100644
index 0000000..d3ee93e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-info.js
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import DS from 'ember-data';
+import Ember from 'ember';
+
+export default DS.JSONAPISerializer.extend({
+  internalNormalizeSingleResponse(store, primaryModelClass, payload) {
+    var info = payload.info;
+    var configs = payload.configs;
+    var quicklinks = info.QUICK_LINKS;
+    var metrics = payload.metrics;
+    var newConfigs = Ember.A();
+    var newQuicklinks = Ember.A();
+    var newMetrics = Ember.Object.create();
+
+    if (configs) {
+      for (let conf in configs) {
+        let confObj = Ember.Object.create({
+          name: conf,
+          value: configs[conf] || 'N/A'
+        });
+        newConfigs.push(confObj);
+      }
+    }
+
+    if (quicklinks) {
+      for (let link in quicklinks) {
+        let linkObj = Ember.Object.create({
+          name: link,
+          value: quicklinks[link] || 'N/A'
+        });
+        newQuicklinks.push(linkObj);
+      }
+    }
+
+    if (metrics) {
+      metrics.forEach(function(metric) {
+        let val = metric.values[Object.keys(metric.values)[0]];
+        newMetrics.set(metric.id, ((val !== undefined)? val : 'N/A'));
+      });
+    }
+
+    var fixedPayload = {
+      id: 'yarn_service_info_' + payload.id,
+      type: primaryModelClass.modelName,
+      attributes: {
+        name: info.NAME,
+        appId: payload.id,
+        state: info.STATE,
+        createdTimestamp: payload.createdtime,
+        launchTimestamp: info.LAUNCH_TIME,
+        quicklinks: newQuicklinks,
+        configs: newConfigs,
+        metrics: newMetrics
+      }
+    };
+
+    return fixedPayload;
+  },
+
+  normalizeSingleResponse(store, primaryModelClass, payload/*, id, requestType*/) {
+    var normalizedResponse = {data: []};
+
+    if (payload && payload[0]) {
+      var pl = this.internalNormalizeSingleResponse(store, primaryModelClass, payload[0]);
+      normalizedResponse.data = pl;
+    }
+
+    return normalizedResponse;
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
index d246f2d..d4465db 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
@@ -487,7 +487,7 @@ table.table-custom-action > thead > tr > th:last-of-type, table.table-custom-act
 
 table.table-custom-bordered {
   border: 1px solid #ddd !important;
-  border-radius: 3px !important;
+  border-radius: 4px !important;
 }
 
 table.table-custom-bordered > thead > tr > th, table.table-custom-bordered > tbody > tr > td {
@@ -499,6 +499,18 @@ table.table-custom-striped > thead > tr, .table-custom-striped > tbody > tr:nth-
   background-color: #f9f9f9 !important;
 }
 
+table.table-custom-header > thead > tr > th {
+  background-color: #f5f5f5 !important;
+}
+
+table.table-radius-none {
+  border-radius: 0 !important;
+}
+
+table.table-border-none {
+  border: none !important;
+}
+
 .deploy-service label.required:after, .deploy-service-modal label.required:after {
   content: '*';
   color: #d9534f;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/confirm-dialog.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/confirm-dialog.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/confirm-dialog.hbs
new file mode 100644
index 0000000..b3bc49a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/confirm-dialog.hbs
@@ -0,0 +1,37 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="modal fade" tabindex="-1" role="dialog" id="{{dialogId}}">
+  <div class="modal-dialog" role="document">
+    <div class="modal-content" style="width: 500px;">
+      <div class="modal-header">
+        <button type="button" class="close" data-dismiss="modal" aria-label="Close">
+          <span aria-hidden="true">&times;</span>
+        </button>
+        <h3 class="modal-title">{{title}}</h3>
+      </div>
+      <div class="modal-body">
+        {{message}}
+      </div>
+      <div class="modal-footer">
+        <button type="button" class="btn btn-default" data-dismiss="modal">No</button>
+        <button type="button" class="btn btn-primary" {{action "yesConfirmed"}}>Yes</button>
+      </div>
+    </div>
+  </div>
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/metrics-table.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/metrics-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/metrics-table.hbs
new file mode 100644
index 0000000..6e4e990
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/metrics-table.hbs
@@ -0,0 +1,82 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+{{#if metrics}}
+  <div class="row">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        <div class="panel-title">{{type}} Metrics: Success Information</div>
+      </div>
+      <div class="">
+        <table class="table table-hover table-custom-bordered table-custom-stripped table-radius-none table-border-none">
+          <thead>
+            <tr>
+              <th>Desired Containers</th>
+              <th>Running Containers</th>
+              <th>Completed Containers</th>
+              <th>Pending Containers</th>
+              <th>Surplus Containers</th>
+            </tr>
+          </thead>
+          <tbody>
+            <tr>
+              <td>{{metrics.ContainersDesired}}</td>
+              <td>{{metrics.ContainersRunning}}</td>
+              <td>{{metrics.ContainersCompleted}}</td>
+              <td>{{metrics.ContainersPending}}</td>
+              <td>{{metrics.SurplusContainers}}</td>
+            </tr>
+          </tbody>
+        </table>
+      </div>
+    </div>
+  </div>
+  <div class="row">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        <div class="panel-title">{{type}} Metrics: Failure Information</div>
+      </div>
+      <div class="">
+        <table class="table table-hover table-custom-bordered table-custom-stripped table-radius-none table-border-none">
+          <thead>
+            <tr>
+              <th>Failed Containers</th>
+              <th>Containers Failed Since Last Threshold</th>
+              <th>Preempted Containers</th>
+              <th>Pending Anti-Affinity Containers</th>
+            </tr>
+          </thead>
+          <tbody>
+            <tr>
+              <td>{{metrics.ContainersFailed}}</td>
+              <td>{{metrics.FailedSinceLastThreshold}}</td>
+              <td>{{metrics.ContainersPreempted}}</td>
+              <td>{{metrics.PendingAAContainers}}</td>
+            </tr>
+          </tbody>
+        </table>
+      </div>
+    </div>
+  </div>
+{{else}}
+  <div class="row">
+    <div class="panel panel-default">
+      <h4 class="text-center">No {{type}} metrics available!</h4>
+    </div>
+  </div>
+{{/if}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
index 2fb5ab3..570011c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
@@ -42,6 +42,14 @@
               {{#link-to 'yarn-app.charts' tagName="li" class=(if (eq target.currentPath 'yarn-app.charts') "active")}}
                 {{#link-to 'yarn-app.charts' appId (query-params service=serviceName)}}Resource Usage{{/link-to}}
               {{/link-to}}
+              {{#if serviceName}}
+                {{#link-to 'yarn-app.components' tagName="li" class=(if (eq target.currentPath 'yarn-app.components') "active")}}
+                  {{#link-to 'yarn-app.components' appId (query-params service=serviceName)}}Components{{/link-to}}
+                {{/link-to}}
+                {{#link-to 'yarn-app.configs' tagName="li" class=(if (eq target.currentPath 'yarn-app.configs') "active")}}
+                  {{#link-to 'yarn-app.configs' appId (query-params service=serviceName)}}Configurations &amp; Metrics{{/link-to}}
+                {{/link-to}}
+              {{/if}}
             </ul>
           </ul>
         </div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/components.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/components.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/components.hbs
new file mode 100644
index 0000000..39e6257
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/components.hbs
@@ -0,0 +1,23 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="row">
+  <div class="col-md-12">
+    {{em-table columns=tableColumns rows=model.components}}
+  </div>
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/configs.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/configs.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/configs.hbs
new file mode 100644
index 0000000..ae1e603
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/configs.hbs
@@ -0,0 +1,57 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="col-md-12">
+  {{metrics-table metrics=model.metrics type="Service"}}
+</div>
+
+<div class="row">
+  {{#if model.configs}}
+    <div class="col-md-12">
+      <div class="panel panel-default">
+        <div class="panel-heading">
+          <div class="panel-title">Service Configurations</div>
+        </div>
+        <div class="">
+          <table class="table table-hover table-custom-bordered table-custom-stripped table-radius-none table-border-none">
+            <thead>
+              <tr>
+                <th>Name</th>
+                <th>Value</th>
+              </tr>
+            </thead>
+            <tbody>
+              {{#each model.configs as |config|}}
+                <tr>
+                  <td>{{config.name}}</td>
+                  <td>{{config.value}}</td>
+                </tr>
+              {{/each}}
+            </tbody>
+          </table>
+        </div>
+      </div>
+    </div>
+  {{else}}
+    <div class="col-md-12">
+      <div class="panel panel-default">
+        <h4 class="text-center">No service configurations available!</h4>
+      </div>
+    </div>
+  {{/if}}
+</div>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: YARN-6446. Revisit ATSv2 integration to ensure all required information is published. Contributed by Rohith Sharma K S

Posted by ji...@apache.org.
YARN-6446. Revisit ATSv2 integration to ensure all required information is published. Contributed by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbc0b975
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbc0b975
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbc0b975

Branch: refs/heads/yarn-native-services
Commit: fbc0b9755db644e57aa1ff5e3f7525e8102fc72e
Parents: 81cb532
Author: Jian He <ji...@apache.org>
Authored: Thu May 4 09:15:01 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:25 2017 -0700

----------------------------------------------------------------------
 .../apache/slider/providers/ProviderService.java |  8 ++++++++
 .../providers/docker/DockerProviderService.java  | 10 ++++++++++
 .../slider/server/appmaster/SliderAppMaster.java |  4 +++-
 .../ServiceTimelinePublisher.java                |  8 +++++++-
 .../SliderTimelineMetricsConstants.java          |  2 ++
 .../TestServiceTimelinePublisher.java            | 19 +++++++++++++++----
 6 files changed, 45 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc0b975/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
index 7e92bfa..edd313b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
@@ -28,6 +28,7 @@ import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.launch.ContainerLauncher;
 import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
+import org.apache.slider.server.appmaster.timelineservice.ServiceTimelinePublisher;
 import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
 
 import java.io.IOException;
@@ -57,4 +58,11 @@ public interface ProviderService extends Service {
    */
   boolean processContainerStatus(ContainerId containerId,
       ContainerStatus status);
+
+  /**
+   * Set service publisher.
+   * @param serviceTimelinePublisher service publisher.
+   */
+  void setServiceTimelinePublisher(
+      ServiceTimelinePublisher serviceTimelinePublisher);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc0b975/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
index 93a481c..482bb27 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
@@ -38,6 +38,7 @@ import org.apache.slider.providers.ProviderService;
 import org.apache.slider.providers.ProviderUtils;
 import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
+import org.apache.slider.server.appmaster.timelineservice.ServiceTimelinePublisher;
 import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -57,6 +58,7 @@ public class DockerProviderService extends AbstractService
   private static final String QUICK_LINKS = "quicklinks";
   protected StateAccessForProviders amState;
   protected YarnRegistryViewForProviders yarnRegistry;
+  private ServiceTimelinePublisher serviceTimelinePublisher;
 
   protected DockerProviderService() {
     super("DockerProviderService");
@@ -126,6 +128,9 @@ public class DockerProviderService extends AbstractService
     PublishedConfiguration pubconf = new PublishedConfiguration(QUICK_LINKS,
         application.getQuicklinks().entrySet());
     amState.getPublishedSliderConfigurations().put(QUICK_LINKS, pubconf);
+    if (serviceTimelinePublisher != null) {
+      serviceTimelinePublisher.serviceAttemptUpdated(application);
+    }
   }
 
   public boolean processContainerStatus(ContainerId containerId,
@@ -155,4 +160,9 @@ public class DockerProviderService extends AbstractService
     }
     return false;
   }
+
+  @Override
+  public void setServiceTimelinePublisher(ServiceTimelinePublisher publisher) {
+    this.serviceTimelinePublisher = publisher;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc0b975/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
index 1f379ea..ffa07fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -661,11 +661,13 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
         asyncRMClient.registerTimelineV2Client(timelineClient);
         timelineClient.init(getConfig());
         timelineClient.start();
-        log.info("Timeline client started.");
+        log.info("Timeline v2 client started.");
 
         serviceTimelinePublisher = new ServiceTimelinePublisher(timelineClient);
         serviceTimelinePublisher.init(getConfig());
         serviceTimelinePublisher.start();
+
+        providerService.setServiceTimelinePublisher(serviceTimelinePublisher);
         appState.setServiceTimelinePublisher(serviceTimelinePublisher);
         log.info("ServiceTimelinePublisher started.");
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc0b975/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/ServiceTimelinePublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/ServiceTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/ServiceTimelinePublisher.java
index 0e04af5..7504140 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/ServiceTimelinePublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/ServiceTimelinePublisher.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
-import org.apache.hadoop.yarn.client.api.TimelineClient;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.slider.api.resource.Application;
@@ -109,6 +108,13 @@ public class ServiceTimelinePublisher extends CompositeService {
     publishComponents(application.getComponents());
   }
 
+  public void serviceAttemptUpdated(Application application) {
+    TimelineEntity entity = createServiceAttemptEntity(application.getId());
+    entity.addInfo(SliderTimelineMetricsConstants.QUICK_LINKS,
+        application.getQuicklinks());
+    putEntity(entity);
+  }
+
   public void serviceAttemptUnregistered(AppState appState,
       ActionStopSlider stopAction) {
     long currentTimeMillis = System.currentTimeMillis();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc0b975/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineMetricsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineMetricsConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineMetricsConstants.java
index 23e059d..58d77ce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineMetricsConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineMetricsConstants.java
@@ -37,6 +37,8 @@ public final class SliderTimelineMetricsConstants {
 
   public static final String LAUNCH_TIME = "LAUNCH_TIME";
 
+  public static final String QUICK_LINKS = "QUICK_LINKS";
+
   public static final String LAUNCH_COMMAND = "LAUNCH_COMMAND";
 
   public static final String TOTAL_CONTAINERS = "NUMBER_OF_CONTAINERS";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc0b975/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/TestServiceTimelinePublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/TestServiceTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/TestServiceTimelinePublisher.java
index 9f3ebba..e4fcbe8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/TestServiceTimelinePublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/TestServiceTimelinePublisher.java
@@ -23,10 +23,9 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity.Identifier;
-import org.apache.hadoop.yarn.client.api.TimelineClient;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
-import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
 import org.apache.hadoop.yarn.client.api.impl.TimelineV2ClientImpl;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.ApplicationState;
@@ -77,6 +76,8 @@ public class TestServiceTimelinePublisher {
   @Before
   public void setUp() throws Exception {
     config = new Configuration();
+    config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+    config.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
     timelineClient =
         new DummyTimelineClient(ApplicationId.fromString(SERVICEID));
     serviceTimelinePublisher = new ServiceTimelinePublisher(timelineClient);
@@ -88,8 +89,12 @@ public class TestServiceTimelinePublisher {
 
   @After
   public void tearDown() throws Exception {
-    serviceTimelinePublisher.stop();
-    timelineClient.stop();
+    if (serviceTimelinePublisher != null) {
+      serviceTimelinePublisher.stop();
+    }
+    if (timelineClient != null) {
+      timelineClient.stop();
+    }
   }
 
   @Test
@@ -264,6 +269,12 @@ public class TestServiceTimelinePublisher {
     @Override
     public void putEntitiesAsync(TimelineEntity... entities)
         throws IOException, YarnException {
+      putEntities(entities);
+    }
+
+    @Override
+    public void putEntities(TimelineEntity... entities)
+        throws IOException, YarnException {
       for (TimelineEntity timelineEntity : entities) {
         TimelineEntity entity =
             lastPublishedEntities.get(timelineEntity.getIdentifier());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: YARN-6255. Refactor yarn-native-services framework. Contributed by Jian He

Posted by ji...@apache.org.
YARN-6255. Refactor yarn-native-services framework. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0536f18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0536f18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0536f18

Branch: refs/heads/yarn-native-services
Commit: c0536f18205a2d792f3d72fb0e6a07873c8e9727
Parents: e56c228
Author: Jian He <ji...@apache.org>
Authored: Sun Mar 26 21:42:14 2017 +0800
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:24 2017 -0700

----------------------------------------------------------------------
 .../yarn/services/api/ApplicationApi.java       |    2 -
 .../api/impl/ApplicationApiService.java         | 1477 ++----------
 .../yarn/services/utils/RestApiConstants.java   |   63 -
 .../services/utils/RestApiErrorMessages.java    |   83 -
 .../services/webapp/ApplicationApiWebApp.java   |    2 +-
 .../api/impl/TestApplicationApiService.java     |   65 +-
 .../apache/slider/api/SliderApplicationApi.java |    8 -
 .../slider/api/SliderClusterProtocol.java       |   14 +-
 .../apache/slider/api/resource/Application.java |   16 +-
 .../apache/slider/api/resource/Component.java   |   38 +-
 .../apache/slider/api/resource/ConfigFile.java  |   26 +-
 .../slider/api/resource/Configuration.java      |   30 +
 .../apache/slider/api/resource/Resource.java    |    2 +-
 .../org/apache/slider/client/SliderClient.java  | 2139 ++++--------------
 .../apache/slider/client/SliderClientAPI.java   |   71 +-
 .../slider/client/SliderYarnClientImpl.java     |  200 +-
 .../client/ipc/SliderApplicationIpcClient.java  |   11 -
 .../client/ipc/SliderClusterOperations.java     |   59 +-
 .../rest/SliderApplicationApiRestClient.java    |   23 -
 .../org/apache/slider/common/SliderKeys.java    |   10 +-
 .../apache/slider/common/SliderXmlConfKeys.java |    3 +
 .../common/params/AbstractActionArgs.java       |    2 +-
 .../AbstractClusterBuildingActionArgs.java      |  185 +-
 .../slider/common/params/ActionBuildArgs.java   |   32 -
 .../slider/common/params/ActionCreateArgs.java  |   34 +-
 .../slider/common/params/ActionFlexArgs.java    |   34 +-
 .../slider/common/params/ActionFreezeArgs.java  |    4 +-
 .../slider/common/params/ActionThawArgs.java    |    4 +-
 .../slider/common/params/ActionUpgradeArgs.java |   59 +-
 .../apache/slider/common/params/Arguments.java  |    1 +
 .../apache/slider/common/params/ClientArgs.java |   16 +-
 .../slider/common/params/SliderAMArgs.java      |    2 +-
 .../slider/common/params/SliderActions.java     |    6 +-
 .../slider/common/tools/CoreFileSystem.java     |    1 +
 .../slider/common/tools/SliderFileSystem.java   |    9 +
 .../apache/slider/common/tools/SliderUtils.java |   32 +-
 .../slider/core/launch/AppMasterLauncher.java   |  233 --
 .../slider/core/launch/LaunchedApplication.java |  108 -
 .../slider/core/launch/RunningApplication.java  |   76 -
 .../core/persist/AppDefinitionPersister.java    |  263 ---
 .../slider/core/persist/JsonSerDeser.java       |    6 +
 .../providers/AbstractClientProvider.java       |   15 +-
 .../providers/AbstractProviderService.java      |  438 ----
 .../apache/slider/providers/ProviderCore.java   |   12 -
 .../apache/slider/providers/ProviderRole.java   |   19 +-
 .../slider/providers/ProviderService.java       |  157 +-
 .../apache/slider/providers/ProviderUtils.java  |  929 +-------
 .../providers/docker/DockerClientProvider.java  |   35 +-
 .../providers/docker/DockerProviderService.java |  423 +---
 .../slideram/SliderAMClientProvider.java        |  305 ---
 .../slideram/SliderAMProviderService.java       |  185 --
 .../server/appmaster/RoleLaunchService.java     |  122 +-
 .../server/appmaster/SliderAppMaster.java       |  540 ++---
 .../appmaster/actions/ActionFlexCluster.java    |   14 +-
 .../actions/ActionRegisterServiceInstance.java  |   17 +-
 .../server/appmaster/metrics/SliderMetrics.java |   80 +
 .../ProviderNotifyingOperationHandler.java      |   63 -
 .../rpc/SliderClusterProtocolPBImpl.java        |   22 +-
 .../rpc/SliderClusterProtocolProxy.java         |   19 +-
 .../server/appmaster/rpc/SliderIPCService.java  |   55 +-
 .../security/SecurityConfiguration.java         |    1 +
 .../slider/server/appmaster/state/AppState.java | 1031 ++-------
 .../appmaster/state/AppStateBindingInfo.java    |   10 +-
 .../appmaster/state/ProviderAppState.java       |   48 +-
 .../server/appmaster/state/RoleHistory.java     |   15 +-
 .../server/appmaster/state/RoleInstance.java    |    2 +
 .../server/appmaster/state/RoleStatus.java      |  324 +--
 .../state/StateAccessForProviders.java          |   61 +-
 .../server/appmaster/web/SliderAMWebApp.java    |   15 +-
 .../slider/server/appmaster/web/WebAppApi.java  |    7 -
 .../server/appmaster/web/WebAppApiImpl.java     |   15 +-
 .../appmaster/web/rest/AMWebServices.java       |   33 +-
 .../server/appmaster/web/rest/RestPaths.java    |   23 +-
 .../ApplicationResouceContentCacheFactory.java  |   27 -
 .../rest/application/ApplicationResource.java   |  516 -----
 .../resources/AggregateModelRefresher.java      |    6 +-
 .../application/resources/AppconfRefresher.java |    5 +-
 .../resources/LiveResourcesRefresher.java       |   68 -
 .../resources/LiveStatisticsRefresher.java      |   39 -
 .../resources/ResourceSnapshotRefresher.java    |   40 -
 .../web/rest/management/ManagementResource.java |    3 +-
 .../web/view/ClusterSpecificationBlock.java     |    2 +-
 .../appmaster/web/view/ContainerStatsBlock.java |   16 +-
 .../server/appmaster/web/view/IndexBlock.java   |   71 +-
 .../servicemonitor/YarnApplicationProbe.java    |   86 -
 .../YarnRegistryViewForProviders.java           |    2 -
 .../apache/slider/util/RestApiConstants.java    |   63 +
 .../slider/util/RestApiErrorMessages.java       |   83 +
 .../org/apache/slider/util/ServiceApiUtil.java  |  203 ++
 .../src/main/proto/SliderClusterMessages.proto  |   16 +-
 .../src/main/proto/SliderClusterProtocol.proto  |   14 +-
 .../main/resources/org/apache/slider/slider.xml |    4 -
 .../core/launch/TestAppMasterLauncher.java      |  157 --
 .../TestAppMasterLauncherWithAmReset.java       |   92 -
 .../appmaster/TestServiceRecordAttributes.java  |   68 -
 95 files changed, 2027 insertions(+), 10038 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/ApplicationApi.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/ApplicationApi.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/ApplicationApi.java
index 0fb6402..0f4bdae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/ApplicationApi.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/ApplicationApi.java
@@ -30,8 +30,6 @@ public interface ApplicationApi {
 
   Response getApplications(String state);
 
-  Response getApplication(String appName);
-
   Response deleteApplication(String appName);
 
   Response updateApplication(String appName, Application updateAppData);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
index 5a4726e..b4f6a2e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
@@ -17,103 +17,55 @@
 
 package org.apache.hadoop.yarn.services.api.impl;
 
-import static org.apache.hadoop.yarn.services.utils.RestApiConstants.*;
-import static org.apache.hadoop.yarn.services.utils.RestApiErrorMessages.*;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.lang.reflect.UndeclaredThrowableException;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.regex.Pattern;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
-
-import org.apache.commons.lang.SerializationUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.security.UserGroupInformation;
+import com.google.inject.Singleton;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.services.api.ApplicationApi;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.ApplicationState;
 import org.apache.slider.api.resource.ApplicationStatus;
-import org.apache.slider.api.resource.Artifact;
 import org.apache.slider.api.resource.Component;
-import org.apache.slider.api.resource.ConfigFile;
-import org.apache.slider.api.resource.Configuration;
-import org.apache.slider.api.resource.Container;
-import org.apache.slider.api.resource.ContainerState;
-import org.apache.slider.api.resource.Resource;
-import org.apache.slider.api.OptionKeys;
-import org.apache.slider.api.ResourceKeys;
-import org.apache.slider.api.StateValues;
+import org.apache.slider.util.ServiceApiUtil;
 import org.apache.slider.client.SliderClient;
-import org.apache.slider.common.SliderExitCodes;
-import org.apache.slider.common.SliderKeys;
-import org.apache.slider.common.params.ActionCreateArgs;
-import org.apache.slider.common.params.ActionFlexArgs;
 import org.apache.slider.common.params.ActionFreezeArgs;
-import org.apache.slider.common.params.ActionListArgs;
-import org.apache.slider.common.params.ActionRegistryArgs;
-import org.apache.slider.common.params.ActionThawArgs;
-import org.apache.slider.common.params.ActionUpdateArgs;
-import org.apache.slider.common.params.ComponentArgsDelegate;
 import org.apache.slider.common.tools.SliderUtils;
 import org.apache.slider.common.tools.SliderVersionInfo;
 import org.apache.slider.core.buildutils.BuildHelper;
-import org.apache.slider.core.exceptions.BadClusterStateException;
-import org.apache.slider.core.exceptions.NotFoundException;
 import org.apache.slider.core.exceptions.SliderException;
-import org.apache.slider.core.exceptions.UnknownApplicationInstanceException;
-import org.apache.slider.core.registry.docstore.ConfigFormat;
-import org.apache.slider.providers.docker.DockerKeys;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonNull;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParser;
-import com.google.inject.Singleton;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.slider.util.RestApiConstants.*;
 
 @Singleton
 @Path(APPLICATIONS_API_RESOURCE_PATH)
 @Consumes({ MediaType.APPLICATION_JSON })
 @Produces({ MediaType.APPLICATION_JSON })
-public class ApplicationApiService implements ApplicationApi {
-  private static final Logger logger = LoggerFactory
-      .getLogger(ApplicationApiService.class);
-  private static org.apache.hadoop.conf.Configuration SLIDER_CONFIG;
-  private static UserGroupInformation SLIDER_USER;
+public class ApplicationApiService {
+  private static final Logger logger =
+      LoggerFactory.getLogger(ApplicationApiService.class);
+  private static org.apache.hadoop.conf.Configuration SLIDER_CONFIG =
+      new YarnConfiguration();
   private static SliderClient SLIDER_CLIENT;
   private static Response SLIDER_VERSION;
-  private static final JsonParser JSON_PARSER = new JsonParser();
-  private static final JsonObject EMPTY_JSON_OBJECT = new JsonObject();
-  private static final ActionListArgs ACTION_LIST_ARGS = new ActionListArgs();
   private static final ActionFreezeArgs ACTION_FREEZE_ARGS = new ActionFreezeArgs();
 
   static {
@@ -122,8 +74,6 @@ public class ApplicationApiService implements ApplicationApi {
 
   // initialize all the common resources - order is important
   protected static void init() {
-    SLIDER_CONFIG = getSliderClientConfiguration();
-    SLIDER_USER = getSliderUser();
     SLIDER_CLIENT = createSliderClient();
     SLIDER_VERSION = initSliderVersion();
   }
@@ -131,8 +81,7 @@ public class ApplicationApiService implements ApplicationApi {
   @GET
   @Path("/versions/slider-version")
   @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON })
-  public Response getSliderVersion() {
+  @Produces({ MediaType.APPLICATION_JSON }) public Response getSliderVersion() {
     logger.info("GET: getSliderVersion");
     return SLIDER_VERSION;
   }
@@ -148,580 +97,45 @@ public class ApplicationApiService implements ApplicationApi {
         + "\", \"hadoop_version\": \"" + hadoopVersion + "\"}").build();
   }
 
-  @POST
-  @Consumes({ MediaType.APPLICATION_JSON })
+  @POST @Consumes({ MediaType.APPLICATION_JSON })
   @Produces({ MediaType.APPLICATION_JSON })
   public Response createApplication(Application application) {
-    logger.info("POST: createApplication for app = {}", application);
+    logger.info("POST: createApplication = {}", application);
     ApplicationStatus applicationStatus = new ApplicationStatus();
-
-    Map<String, String> compNameArtifactIdMap = new HashMap<>();
-    // post payload validation
     try {
-      validateApplicationPostPayload(application, compNameArtifactIdMap);
+      ApplicationId applicationId = SLIDER_CLIENT.actionCreate(application);
+      logger.info("Successfully created application " + application.getName()
+          + " applicationId = " + applicationId);
+      applicationStatus.setState(ApplicationState.ACCEPTED);
+      applicationStatus.setUri(
+          CONTEXT_ROOT + APPLICATIONS_API_RESOURCE_PATH + "/" + application
+              .getName());
+      return Response.status(Status.CREATED).entity(applicationStatus).build();
     } catch (IllegalArgumentException e) {
       applicationStatus.setDiagnostics(e.getMessage());
       return Response.status(Status.BAD_REQUEST).entity(applicationStatus)
           .build();
-    }
-    String applicationId = null;
-    try {
-      applicationId = createSliderApp(application, compNameArtifactIdMap);
-      applicationStatus.setState(ApplicationState.ACCEPTED);
-    } catch (SliderException se) {
-      logger.error("Create application failed", se);
-      if (se.getExitCode() == SliderExitCodes.EXIT_APPLICATION_IN_USE) {
-        applicationStatus.setDiagnostics(ERROR_APPLICATION_IN_USE);
-        return Response.status(Status.BAD_REQUEST).entity(applicationStatus)
-            .build();
-      } else if (se.getExitCode() == SliderExitCodes.EXIT_INSTANCE_EXISTS) {
-        applicationStatus.setDiagnostics(ERROR_APPLICATION_INSTANCE_EXISTS);
-        return Response.status(Status.BAD_REQUEST).entity(applicationStatus)
-            .build();
-      } else {
-        applicationStatus.setDiagnostics(se.getMessage());
-      }
     } catch (Exception e) {
-      logger.error("Create application failed", e);
-      applicationStatus.setDiagnostics(e.getMessage());
-    }
-
-    if (StringUtils.isNotEmpty(applicationId)) {
-      applicationStatus.setUri(CONTEXT_ROOT + APPLICATIONS_API_RESOURCE_PATH
-          + "/" + application.getName());
-      // 202 = ACCEPTED
-      return Response.status(HTTP_STATUS_CODE_ACCEPTED)
-          .entity(applicationStatus).build();
-    } else {
+      String message = "Failed to create application " + application.getName();
+      logger.error(message, e);
+      applicationStatus.setDiagnostics(message + ": " + e.getMessage());
       return Response.status(Status.INTERNAL_SERVER_ERROR)
           .entity(applicationStatus).build();
     }
   }
 
-  @VisibleForTesting
-  protected void validateApplicationPostPayload(Application application,
-      Map<String, String> compNameArtifactIdMap) {
-    if (StringUtils.isEmpty(application.getName())) {
-      throw new IllegalArgumentException(ERROR_APPLICATION_NAME_INVALID);
-    }
-    if (!SliderUtils.isClusternameValid(application.getName())) {
-      throw new IllegalArgumentException(ERROR_APPLICATION_NAME_INVALID_FORMAT);
-    }
-
-    // If the application has no components do top-level checks
-    if (application.getComponents() == null
-        || application.getComponents().size() == 0) {
-      // artifact
-      if (application.getArtifact() == null) {
-        throw new IllegalArgumentException(ERROR_ARTIFACT_INVALID);
-      }
-      if (StringUtils.isEmpty(application.getArtifact().getId())) {
-        throw new IllegalArgumentException(ERROR_ARTIFACT_ID_INVALID);
-      }
-
-      // If artifact is of type APPLICATION, add a slider specific property
-      if (application.getArtifact().getType() == Artifact.TypeEnum.APPLICATION) {
-        if (application.getConfiguration() == null) {
-          application.setConfiguration(new Configuration());
-        }
-        addPropertyToConfiguration(application.getConfiguration(),
-            SliderKeys.COMPONENT_TYPE_KEY,
-            SliderKeys.COMPONENT_TYPE_EXTERNAL_APP);
-      }
-      // resource
-      validateApplicationResource(application.getResource(), null, application
-          .getArtifact().getType());
-
-      // container size
-      if (application.getNumberOfContainers() == null) {
-        throw new IllegalArgumentException(ERROR_CONTAINERS_COUNT_INVALID);
-      }
-
-      // Since it is a simple app with no components, create a default component
-      application.setComponents(getDefaultComponentAsList(application));
-    } else {
-      // If the application has components, then run checks for each component.
-      // Let global values take effect if component level values are not
-      // provided.
-      Artifact globalArtifact = application.getArtifact();
-      Resource globalResource = application.getResource();
-      Long globalNumberOfContainers = application.getNumberOfContainers();
-      for (Component comp : application.getComponents()) {
-        // artifact
-        if (comp.getArtifact() == null) {
-          comp.setArtifact(globalArtifact);
-        }
-        // If still null raise validation exception
-        if (comp.getArtifact() == null) {
-          throw new IllegalArgumentException(String.format(
-              ERROR_ARTIFACT_FOR_COMP_INVALID, comp.getName()));
-        }
-        if (StringUtils.isEmpty(comp.getArtifact().getId())) {
-          throw new IllegalArgumentException(String.format(
-              ERROR_ARTIFACT_ID_FOR_COMP_INVALID, comp.getName()));
-        }
-
-        // If artifact is of type APPLICATION, add a slider specific property
-        if (comp.getArtifact().getType() == Artifact.TypeEnum.APPLICATION) {
-          if (comp.getConfiguration() == null) {
-            comp.setConfiguration(new Configuration());
-          }
-          addPropertyToConfiguration(comp.getConfiguration(),
-              SliderKeys.COMPONENT_TYPE_KEY,
-              SliderKeys.COMPONENT_TYPE_EXTERNAL_APP);
-          compNameArtifactIdMap.put(comp.getName(), comp.getArtifact().getId());
-          comp.setName(comp.getArtifact().getId());
-        }
-
-        // resource
-        if (comp.getResource() == null) {
-          comp.setResource(globalResource);
-        }
-        validateApplicationResource(comp.getResource(), comp, comp
-            .getArtifact().getType());
-
-        // container count
-        if (comp.getNumberOfContainers() == null) {
-          comp.setNumberOfContainers(globalNumberOfContainers);
-        }
-        if (comp.getNumberOfContainers() == null) {
-          throw new IllegalArgumentException(String.format(
-              ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID, comp.getName()));
-        }
-      }
-    }
-
-    // Application lifetime if not specified, is set to unlimited lifetime
-    if (application.getLifetime() == null) {
-      application.setLifetime(DEFAULT_UNLIMITED_LIFETIME);
-    }
-  }
-
-  private void validateApplicationResource(Resource resource, Component comp,
-      Artifact.TypeEnum artifactType) {
-    // Only apps/components of type APPLICATION can skip resource requirement
-    if (resource == null && artifactType == Artifact.TypeEnum.APPLICATION) {
-      return;
-    }
-    if (resource == null) {
-      throw new IllegalArgumentException(comp == null ? ERROR_RESOURCE_INVALID
-          : String.format(ERROR_RESOURCE_FOR_COMP_INVALID, comp.getName()));
-    }
-    // One and only one of profile OR cpus & memory can be specified. Specifying
-    // both raises validation error.
-    if (StringUtils.isNotEmpty(resource.getProfile())
-        && (resource.getCpus() != null
-            || StringUtils.isNotEmpty(resource.getMemory()))) {
-      throw new IllegalArgumentException(
-          comp == null ? ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED
-              : String.format(
-                  ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED,
-                  comp.getName()));
-    }
-    // Currently resource profile is not supported yet, so we will raise
-    // validation error if only resource profile is specified
-    if (StringUtils.isNotEmpty(resource.getProfile())) {
-      throw new IllegalArgumentException(
-          ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET);
-    }
-
-    String memory = resource.getMemory();
-    Integer cpus = resource.getCpus();
-    if (StringUtils.isEmpty(memory)) {
-      throw new IllegalArgumentException(
-          comp == null ? ERROR_RESOURCE_MEMORY_INVALID : String.format(
-              ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID, comp.getName()));
-    }
-    if (cpus == null) {
-      throw new IllegalArgumentException(
-          comp == null ? ERROR_RESOURCE_CPUS_INVALID : String.format(
-              ERROR_RESOURCE_CPUS_FOR_COMP_INVALID, comp.getName()));
-    }
-    if (cpus <= 0) {
-      throw new IllegalArgumentException(
-          comp == null ? ERROR_RESOURCE_CPUS_INVALID_RANGE : String.format(
-              ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE, comp.getName()));
-    }
-  }
-
-  private String createSliderApp(Application application,
-      Map<String, String> compNameArtifactIdMap) throws IOException,
-      YarnException, InterruptedException {
-    final String appName = application.getName();
-    final String queueName = application.getQueue();
-
-    final ActionCreateArgs createArgs = new ActionCreateArgs();
-    addAppConfOptions(createArgs, application, compNameArtifactIdMap);
-    addResourceOptions(createArgs, application);
-
-    createArgs.provider = DockerKeys.PROVIDER_DOCKER;
-
-    if (queueName != null && queueName.trim().length() > 0) {
-      createArgs.queue = queueName.trim();
-    }
-    createArgs.lifetime = application.getLifetime();
-    return invokeSliderClientRunnable(new SliderClientContextRunnable<String>() {
-      @Override
-      public String run(SliderClient sliderClient) throws YarnException,
-          IOException, InterruptedException {
-        sliderClient.actionCreate(appName, createArgs);
-        ApplicationId applicationId = sliderClient.applicationId;
-        if (applicationId != null) {
-          return applicationId.toString();
-          // return getApplicationIdString(applicationId);
-        }
-        return null;
-      }
-    });
-  }
-
-  private void addAppConfOptions(ActionCreateArgs createArgs,
-      Application application, Map<String, String> compNameArtifactIdMap) throws IOException {
-    List<String> appCompOptionTriples = createArgs.optionsDelegate.compOptTriples; // TODO: optionTuples instead of compOptTriples
-    logger.info("Initial appCompOptionTriples = {}",
-        Arrays.toString(appCompOptionTriples.toArray()));
-    List<String> appOptions = createArgs.optionsDelegate.optionTuples;
-    logger.info("Initial appOptions = {}",
-        Arrays.toString(appOptions.toArray()));
-    // TODO: Set Slider-AM memory and vcores here
-    //    appCompOptionTriples.addAll(Arrays.asList(SLIDER_APPMASTER_COMPONENT_NAME,
-    //        "", ""));
-
-    // Global configuration - for override purpose
-    // TODO: add it to yaml
-    Configuration globalConfig = null;
-    //    Configuration globalConfig = (Configuration) SerializationUtils
-    //        .clone(application.getConfiguration());
-
-    // TODO: Add the below into globalConfig
-    //    if (application.getConfigurations() != null) {
-    //      for (Entry<String, String> entry : application.getConfigurations()
-    //          .entrySet()) {
-    //        globalConf.addProperty(entry.getKey(), entry.getValue());
-    //      }
-    //    }
-
-    Set<String> uniqueGlobalPropertyCache = new HashSet<>();
-    if (application.getConfiguration() != null) {
-      if (application.getConfiguration().getProperties() != null) {
-        for (Map.Entry<String, String> propEntry : application
-            .getConfiguration().getProperties().entrySet()) {
-          addOptionsIfNotPresent(appOptions, uniqueGlobalPropertyCache,
-              propEntry.getKey(), propEntry.getValue());
-        }
-      }
-      List<ConfigFile> configFiles = application.getConfiguration().getFiles();
-      if (configFiles != null && !configFiles.isEmpty()) {
-        addOptionsIfNotPresent(appOptions, uniqueGlobalPropertyCache,
-            SliderKeys.AM_CONFIG_GENERATION, "true");
-        for (ConfigFile configFile : configFiles) {
-          addOptionsIfNotPresent(appOptions, uniqueGlobalPropertyCache,
-              OptionKeys.CONF_FILE_PREFIX + configFile.getSrcFile() +
-                  OptionKeys.NAME_SUFFIX, configFile.getDestFile());
-          addOptionsIfNotPresent(appOptions, uniqueGlobalPropertyCache,
-              OptionKeys.CONF_FILE_PREFIX + configFile.getSrcFile() +
-                  OptionKeys.TYPE_SUFFIX, configFile.getType().toString());
-        }
-      }
-    }
-    if (application.getComponents() != null) {
-
-      Map<String, String> appQuicklinks = application.getQuicklinks();
-      if (appQuicklinks != null) {
-        for (Map.Entry<String, String> quicklink : appQuicklinks.entrySet()) {
-          addOptionsIfNotPresent(appOptions, uniqueGlobalPropertyCache,
-              OptionKeys.EXPORT_PREFIX + quicklink.getKey(),
-              quicklink.getValue());
-        }
-      }
-
-      Map<String, String> placeholders = new HashMap<>();
-      placeholders.put(PLACEHOLDER_APP_NAME, application.getName());
-      for (Component comp : application.getComponents()) {
-        placeholders.put(PLACEHOLDER_APP_COMPONENT_NAME, comp.getName());
-        if (comp.getArtifact().getType() == Artifact.TypeEnum.DOCKER) {
-          appCompOptionTriples.addAll(Arrays.asList(comp.getName(),
-              DockerKeys.DOCKER_IMAGE, comp.getArtifact().getId() == null ?
-              application.getArtifact().getId() : comp.getArtifact().getId()));
-          appCompOptionTriples.addAll(Arrays.asList(comp.getName(),
-              DockerKeys.DOCKER_START_COMMAND, comp.getLaunchCommand() == null ?
-              replacePlaceholders(application.getLaunchCommand(), placeholders)
-              : replacePlaceholders(comp.getLaunchCommand(), placeholders)));
-          appCompOptionTriples.addAll(Arrays.asList(comp.getName(),
-              DockerKeys.DOCKER_NETWORK, DockerKeys.DEFAULT_DOCKER_NETWORK));
-          if (comp.getRunPrivilegedContainer() != null) {
-            appCompOptionTriples.addAll(Arrays.asList(comp.getName(),
-                DockerKeys.DOCKER_USE_PRIVILEGED,
-                comp.getRunPrivilegedContainer().toString()));
-          }
-        }
-
-        if (comp.getConfiguration() != null) {
-          List<ConfigFile> configFiles = comp.getConfiguration().getFiles();
-          if (configFiles != null && !configFiles.isEmpty()) {
-            appCompOptionTriples.addAll(Arrays.asList(comp.getName(),
-                SliderKeys.AM_CONFIG_GENERATION, "true"));
-            for (ConfigFile configFile : configFiles) {
-              appCompOptionTriples.addAll(Arrays.asList(comp.getName(),
-                  OptionKeys.CONF_FILE_PREFIX + configFile.getSrcFile() +
-                      OptionKeys.NAME_SUFFIX, configFile.getDestFile()));
-              appCompOptionTriples.addAll(Arrays.asList(comp.getName(),
-                  OptionKeys.CONF_FILE_PREFIX + configFile.getSrcFile() +
-                  OptionKeys.TYPE_SUFFIX, configFile.getType().toString()));
-            }
-          }
-        }
-
-        if (Boolean.TRUE.equals(comp.getUniqueComponentSupport())) {
-          for (int i = 1; i <= comp.getNumberOfContainers(); i++) {
-            placeholders.put(PLACEHOLDER_COMPONENT_ID, Integer.toString(i));
-            appCompOptionTriples.addAll(createAppConfigComponent(
-                comp.getName() + i, comp, comp.getName() + i, globalConfig,
-                placeholders, compNameArtifactIdMap));
-          }
-        } else {
-          appCompOptionTriples.addAll(createAppConfigComponent(comp.getName(),
-              comp, comp.getName(), globalConfig, null, compNameArtifactIdMap));
-        }
-      }
-    }
-
-    logger.info("Updated appCompOptionTriples = {}",
-        Arrays.toString(appCompOptionTriples.toArray()));
-    logger.info("Updated appOptions = {}",
-        Arrays.toString(appOptions.toArray()));
-  }
-
-  private void addOptionsIfNotPresent(List<String> options,
-      Set<String> uniqueGlobalPropertyCache, String key, String value) {
-    if (uniqueGlobalPropertyCache == null) {
-      options.addAll(Arrays.asList(key, value));
-    } else if (!uniqueGlobalPropertyCache.contains(key)) {
-      options.addAll(Arrays.asList(key, value));
-      uniqueGlobalPropertyCache.add(key);
-    }
-  }
-
-  private void addPropertyToConfiguration(Configuration conf, String key,
-      String value) {
-    if (conf == null) {
-      return;
-    }
-    if (conf.getProperties() == null) {
-      conf.setProperties(new HashMap<String, String>());
-    }
-    conf.getProperties().put(key, value);
-  }
-
-  private List<String> createAppConfigComponent(String compName,
-      Component component, String configPrefix, Configuration globalConf,
-      Map<String, String> placeholders,
-      Map<String, String> compNameArtifactIdMap) {
-    List<String> appConfOptTriples = new ArrayList<>();
-
-    if (component.getConfiguration() != null
-        && component.getConfiguration().getProperties() != null) {
-      for (Map.Entry<String, String> propEntry : component.getConfiguration()
-          .getProperties().entrySet()) {
-        appConfOptTriples.addAll(Arrays.asList(compName, propEntry.getKey(),
-            replacePlaceholders(propEntry.getValue(), placeholders)));
-      }
-    }
-
-    // If artifact is of type APPLICATION, then in the POST JSON there will
-    // be no component definition for that artifact. Hence it's corresponding id
-    // field is added. Every external APPLICATION has a unique id field.
-    List<String> convertedDeps = new ArrayList<>();
-    for (String dep : component.getDependencies()) {
-      if (compNameArtifactIdMap.containsKey(dep)) {
-        convertedDeps.add(compNameArtifactIdMap.get(dep));
-      } else {
-        convertedDeps.add(dep);
-      }
-    }
-    // If the DNS dependency property is set to true for a component, it means
-    // that it is ensured that DNS entry has been added for all the containers
-    // of this component, before moving on to the next component in the DAG.
-    if (hasPropertyWithValue(component, PROPERTY_DNS_DEPENDENCY, "true")) {
-      if (component.getArtifact().getType() == Artifact.TypeEnum.APPLICATION) {
-        convertedDeps.add(component.getArtifact().getId());
-      } else {
-        convertedDeps.add(compName);
-      }
-    }
-    if (convertedDeps.size() > 0) {
-      appConfOptTriples.addAll(Arrays.asList(compName, "requires",
-          StringUtils.join(convertedDeps, ",")));
-    }
-    return appConfOptTriples;
-  }
-
-  private String replacePlaceholders(String value,
-      Map<String, String> placeholders) {
-    if (StringUtils.isEmpty(value) || placeholders == null) {
-      return value;
-    }
-    for (Map.Entry<String, String> placeholder : placeholders.entrySet()) {
-      value = value.replaceAll(Pattern.quote(placeholder.getKey()),
-          placeholder.getValue());
-    }
-    return value;
-  }
-
-  private List<String> createAppConfigGlobal(Component component,
-      Configuration globalConf, Set<String> uniqueGlobalPropertyCache) {
-    List<String> appOptions = new ArrayList<>();
-    if (component.getConfiguration() != null
-        && component.getConfiguration().getProperties() != null) {
-      for (Map.Entry<String, String> propEntry : component.getConfiguration()
-          .getProperties().entrySet()) {
-        addOptionsIfNotPresent(appOptions, uniqueGlobalPropertyCache,
-            propEntry.getKey(), propEntry.getValue());
-      }
-    }
-    return appOptions;
-  }
-
-  private void addResourceOptions(ActionCreateArgs createArgs,
-      Application application) throws IOException {
-    List<String> resCompOptionTriples = createArgs.optionsDelegate.resCompOptTriples;
-    logger.info("Initial resCompOptTriples = {}",
-        Arrays.toString(resCompOptionTriples.toArray()));
-    // TODO: Add any Slider AM resource specific props here like jvm.heapsize
-    //    resCompOptionTriples.addAll(Arrays.asList(SLIDER_APPMASTER_COMPONENT_NAME,
-    //        "", ""));
-
-    // Global resource - for override purpose
-    Resource globalResource = (Resource) SerializationUtils.clone(application
-        .getResource());
-    // Priority seeded with 1, expecting every new component will increase it by
-    // 1 making it ready for the next component to use.
-    if (application.getComponents() != null) {
-      int priority = 1;
-      for (Component comp : application.getComponents()) {
-        if (hasPropertyWithValue(comp, SliderKeys.COMPONENT_TYPE_KEY,
-            SliderKeys.COMPONENT_TYPE_EXTERNAL_APP)) {
-          continue;
-        }
-        if (Boolean.TRUE.equals(comp.getUniqueComponentSupport())) {
-          for (int i = 1; i <= comp.getNumberOfContainers(); i++) {
-            resCompOptionTriples.addAll(createResourcesComponent(comp.getName()
-                + i, comp, priority, 1, globalResource));
-            priority++;
-          }
-        } else {
-          resCompOptionTriples.addAll(createResourcesComponent(comp.getName(),
-              comp, priority, comp.getNumberOfContainers(), globalResource));
-          priority++;
-        }
-      }
-    }
-
-    logger.info("Updated resCompOptTriples = {}",
-        Arrays.toString(resCompOptionTriples.toArray()));
-  }
-
-  private boolean hasPropertyWithValue(Component comp, String key, String value) {
-    if (comp == null || key == null) {
-      return false;
-    }
-    if (comp.getConfiguration() == null
-        || comp.getConfiguration().getProperties() == null) {
-      return false;
-    }
-    Map<String, String> props = comp.getConfiguration().getProperties();
-    if (props.containsKey(key)) {
-      if (value == null) {
-        return props.get(key) == null;
-      } else {
-        if (value.equals(props.get(key))) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  private List<String> createResourcesComponent(String compName,
-      Component component, int priority, long numInstances,
-      Resource globalResource) {
-    String memory = component.getResource() == null ? globalResource
-        .getMemory() : component.getResource().getMemory();
-    Integer cpus = component.getResource() == null ? globalResource.getCpus()
-        : component.getResource().getCpus();
-
-    List<String> resCompOptTriples = new ArrayList<String>();
-    resCompOptTriples.addAll(Arrays.asList(compName,
-        ResourceKeys.COMPONENT_PRIORITY, Integer.toString(priority)));
-    resCompOptTriples.addAll(Arrays.asList(compName,
-        ResourceKeys.COMPONENT_INSTANCES, Long.toString(numInstances)));
-    resCompOptTriples.addAll(Arrays.asList(compName, ResourceKeys.YARN_MEMORY,
-        memory));
-    resCompOptTriples.addAll(Arrays.asList(compName, ResourceKeys.YARN_CORES,
-        cpus.toString()));
-    if (component.getPlacementPolicy() != null) {
-      resCompOptTriples.addAll(Arrays.asList(compName,
-          ResourceKeys.COMPONENT_PLACEMENT_POLICY,
-          component.getPlacementPolicy().getLabel()));
-    }
-
-    return resCompOptTriples;
-  }
-
-  private static UserGroupInformation getSliderUser() {
-    if (SLIDER_USER != null) {
-      return SLIDER_USER;
-    }
-    UserGroupInformation sliderUser = null;
-    UserGroupInformation.setConfiguration(SLIDER_CONFIG);
-    String loggedInUser = getUserToRunAs();
-    try {
-      sliderUser = UserGroupInformation.getBestUGI(null, loggedInUser);
-      // TODO: Once plugged into RM process we should remove the previous call
-      // and replace it with getCurrentUser as commented below.
-      // sliderUser = UserGroupInformation.getCurrentUser();
-    } catch (IOException e) {
-      throw new RuntimeException("Unable to create UGI (slider user)", e);
-    }
-    return sliderUser;
-  }
-
-  private <T> T invokeSliderClientRunnable(
-      final SliderClientContextRunnable<T> runnable)
-      throws IOException, InterruptedException, YarnException {
-    try {
-      T value = SLIDER_USER.doAs(new PrivilegedExceptionAction<T>() {
-        @Override
-        public T run() throws Exception {
-          return runnable.run(SLIDER_CLIENT);
-        }
-      });
-      return value;
-    } catch (UndeclaredThrowableException e) {
-      Throwable cause = e.getCause();
-      if (cause instanceof YarnException) {
-        YarnException ye = (YarnException) cause;
-        throw ye;
-      }
-      throw e;
-    }
-  }
-
   protected static SliderClient createSliderClient() {
     if (SLIDER_CLIENT != null) {
       return SLIDER_CLIENT;
     }
-    org.apache.hadoop.conf.Configuration sliderClientConfiguration = SLIDER_CONFIG;
+    org.apache.hadoop.conf.Configuration sliderClientConfiguration =
+        SLIDER_CONFIG;
     SliderClient client = new SliderClient() {
-      @Override
-      public void init(org.apache.hadoop.conf.Configuration conf) {
+      @Override public void init(org.apache.hadoop.conf.Configuration conf) {
         super.init(conf);
         try {
           initHadoopBinding();
-        } catch (SliderException e) {
-          throw new RuntimeException(
-              "Unable to automatically init Hadoop binding", e);
-        } catch (IOException e) {
+        } catch (SliderException | IOException e) {
           throw new RuntimeException(
               "Unable to automatically init Hadoop binding", e);
         }
@@ -730,8 +144,7 @@ public class ApplicationApiService implements ApplicationApi {
     try {
       logger
           .debug("Slider Client configuration: {}", sliderClientConfiguration);
-      sliderClientConfiguration = client.bindArgs(sliderClientConfiguration,
-          new String[] { "help" });
+      sliderClientConfiguration = client.bindArgs(sliderClientConfiguration, new String[] { "help" });
       client.init(sliderClientConfiguration);
       client.start();
     } catch (Exception e) {
@@ -741,608 +154,116 @@ public class ApplicationApiService implements ApplicationApi {
     return client;
   }
 
-  private static String getUserToRunAs() {
-    String user = System.getenv(PROPERTY_APP_RUNAS_USER);
-    if (StringUtils.isEmpty(user)) {
-      user = "root";
-    }
-    return user;
-  }
-
-  private static org.apache.hadoop.conf.Configuration getSliderClientConfiguration() {
-    if (SLIDER_CONFIG != null) {
-      return SLIDER_CONFIG;
-    }
-    YarnConfiguration yarnConfig = new YarnConfiguration();
-    logger.info("prop yarn.resourcemanager.address = {}",
-        yarnConfig.get("yarn.resourcemanager.address"));
-
-    return yarnConfig;
-  }
-
-  private interface SliderClientContextRunnable<T> {
-    T run(SliderClient sliderClient)
-        throws YarnException, IOException, InterruptedException;
-  }
-
-  @GET
-  @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON })
-  public Response getApplications(@QueryParam("state") String state) {
-    logger.info("GET: getApplications with param state = {}", state);
-
-    // Get all applications in a specific state - lighter projection. For full
-    // detail, call getApplication on a specific app.
-    Set<ApplicationReport> applications;
-    try {
-      if (StringUtils.isNotEmpty(state)) {
-        ApplicationStatus appStatus = new ApplicationStatus();
-        try {
-          ApplicationState.valueOf(state);
-        } catch (IllegalArgumentException e) {
-          appStatus.setDiagnostics("Invalid value for param state - " + state);
-          return Response.status(Status.BAD_REQUEST).entity(appStatus).build();
-        }
-        applications = getSliderApplications(state);
-      } else {
-        applications = getSliderApplications(true);
-      }
-    } catch (Exception e) {
-      logger.error("Get applications failed", e);
-      return Response.status(Status.INTERNAL_SERVER_ERROR).build();
-    }
-
-    Set<Application> apps = new HashSet<Application>();
-    if (applications.size() > 0) {
-      try {
-        for (ApplicationReport app : applications) {
-          Application application = new Application();
-          application.setLifetime(app.getApplicationTimeouts().get(
-              ApplicationTimeoutType.LIFETIME).getRemainingTime());
-          application.setLaunchTime(new Date(app.getStartTime()));
-          application.setName(app.getName());
-          // Containers not required, setting to null to avoid empty list
-          application.setContainers(null);
-          apps.add(application);
-        }
-      } catch (Exception e) {
-        logger.error("Get applications failed", e);
-        return Response.status(Status.INTERNAL_SERVER_ERROR).build();
-      }
-    }
+  // The information this REST endpoint currently returned can be retrieved from
+  // RM web services
+  // Probably the data from AM is more important. Do that later.
+//  @GET @Consumes({ MediaType.APPLICATION_JSON })
+//  @Produces({ MediaType.APPLICATION_JSON })
+//  public Response getApplications(@QueryParam("state") String state) {
+//    logger.info("GET: getApplications with param state = {}", state);
+//    return null;
+//  }
 
-    return Response.ok().entity(apps).build();
-  }
-
-  @GET
-  @Path("/{app_name}")
+  @GET @Path("/{app_name}")
   @Consumes({ MediaType.APPLICATION_JSON })
   @Produces({ MediaType.APPLICATION_JSON })
   public Response getApplication(@PathParam("app_name") String appName) {
     logger.info("GET: getApplication for appName = {}", appName);
+    ApplicationStatus applicationStatus = new ApplicationStatus();
 
     // app name validation
     if (!SliderUtils.isClusternameValid(appName)) {
-      ApplicationStatus applicationStatus = new ApplicationStatus();
-      applicationStatus.setDiagnostics("Invalid application name");
+      applicationStatus.setDiagnostics("Invalid application name: " + appName);
       applicationStatus.setCode(ERROR_CODE_APP_NAME_INVALID);
       return Response.status(Status.NOT_FOUND).entity(applicationStatus)
           .build();
     }
 
-    // Check if app exists
     try {
-      int livenessCheck = getSliderList(appName);
-      if (livenessCheck < 0) {
-        logger.info("Application not running");
-        ApplicationStatus applicationStatus = new ApplicationStatus();
-        applicationStatus.setDiagnostics(ERROR_APPLICATION_NOT_RUNNING);
-        applicationStatus.setCode(ERROR_CODE_APP_IS_NOT_RUNNING);
+      Application app = SLIDER_CLIENT.actionStatus(appName);
+      ApplicationReport report = SLIDER_CLIENT.findInstance(appName);
+      if (app != null && report != null) {
+        app.setLifetime(
+            report.getApplicationTimeouts().get(ApplicationTimeoutType.LIFETIME)
+                .getRemainingTime());
+        logger.info("Application = {}", app);
+        return Response.ok(app).build();
+      } else {
+        String message = "Application " + appName + " does not exist.";
+        logger.info(message);
+        applicationStatus.setCode(ERROR_CODE_APP_DOES_NOT_EXIST);
+        applicationStatus.setDiagnostics(message);
         return Response.status(Status.NOT_FOUND).entity(applicationStatus)
             .build();
       }
-    } catch (UnknownApplicationInstanceException e) {
-      logger.error("Get application failed, application not found", e);
-      ApplicationStatus applicationStatus = new ApplicationStatus();
-      applicationStatus.setDiagnostics(ERROR_APPLICATION_DOES_NOT_EXIST);
-      applicationStatus.setCode(ERROR_CODE_APP_DOES_NOT_EXIST);
-      return Response.status(Status.NOT_FOUND).entity(applicationStatus)
-          .build();
-    } catch (Exception e) {
-      logger.error("Get application failed, application not running", e);
-      ApplicationStatus applicationStatus = new ApplicationStatus();
-      applicationStatus.setDiagnostics(ERROR_APPLICATION_NOT_RUNNING);
-      applicationStatus.setCode(ERROR_CODE_APP_IS_NOT_RUNNING);
-      return Response.status(Status.NOT_FOUND).entity(applicationStatus)
-          .build();
-    }
-
-    Application app = new Application();
-    app.setName(appName);
-    app.setUri(CONTEXT_ROOT + APPLICATIONS_API_RESOURCE_PATH + "/"
-        + appName);
-    // TODO: add status
-    app.setState(ApplicationState.ACCEPTED);
-    JsonObject appStatus = null;
-    JsonObject appRegistryQuicklinks = null;
-    try {
-      appStatus = getSliderApplicationStatus(appName);
-      appRegistryQuicklinks = getSliderApplicationRegistry(appName,
-          "quicklinks");
-      return populateAppData(app, appStatus, appRegistryQuicklinks);
-    } catch (BadClusterStateException | NotFoundException e) {
-      logger.error(
-          "Get application failed, application not in running state yet", e);
-      ApplicationStatus applicationStatus = new ApplicationStatus();
-      applicationStatus.setDiagnostics("Application not running yet");
-      applicationStatus.setCode(ERROR_CODE_APP_SUBMITTED_BUT_NOT_RUNNING_YET);
-      return Response.status(Status.NOT_FOUND).entity(applicationStatus)
-          .build();
     } catch (Exception e) {
       logger.error("Get application failed", e);
-      ApplicationStatus applicationStatus = new ApplicationStatus();
-      applicationStatus.setDiagnostics("Failed to retrieve application: "
-          + e.getMessage());
+      applicationStatus
+          .setDiagnostics("Failed to retrieve application: " + e.getMessage());
       return Response.status(Status.INTERNAL_SERVER_ERROR)
           .entity(applicationStatus).build();
     }
   }
 
-  private Response populateAppData(Application app, JsonObject appStatus,
-      JsonObject appRegistryQuicklinks) {
-    String appName = jsonGetAsString(appStatus, "name");
-    Long totalNumberOfRunningContainers = 0L;
-    Long totalExpectedNumberOfRunningContainers = 0L;
-    Long totalNumberOfIpAssignedContainers = 0L;
-
-    // info
-    JsonObject applicationInfo = jsonGetAsObject(appStatus, "info");
-    if (applicationInfo != null) {
-      String applicationId = jsonGetAsString(applicationInfo, "info.am.app.id");
-      if (applicationId != null) {
-        app.setId(applicationId);
-      }
-    }
-
-    // state
-    String appState = jsonGetAsString(appStatus, "state");
-    if (appState == null) {
-      // consider that app is still in ACCEPTED state
-      appState = String.valueOf(StateValues.STATE_INCOMPLETE);
-    }
-    switch (Integer.parseInt(appState)) {
-      case StateValues.STATE_LIVE:
-        app.setState(ApplicationState.STARTED);
-        break;
-      case StateValues.STATE_CREATED:
-      case StateValues.STATE_INCOMPLETE:
-      case StateValues.STATE_SUBMITTED:
-        app.setState(ApplicationState.ACCEPTED);
-        return Response.ok(app).build();
-      case StateValues.STATE_DESTROYED:
-      case StateValues.STATE_STOPPED:
-        app.setState(ApplicationState.STOPPED);
-        return Response.ok(app).build();
-      default:
-        break;
-    }
-
-    // start time
-    app.setLaunchTime(appStatus.get("createTime") == null ? null
-        : new Date(appStatus.get("createTime").getAsLong()));
-
-    app.setLifetime(queryLifetime(appName));
-
-    // Quicklinks
-    Map<String, String> appQuicklinks = new HashMap<>();
-    for (Map.Entry<String, JsonElement> quicklink : appRegistryQuicklinks
-        .entrySet()) {
-      appQuicklinks.put(quicklink.getKey(), quicklink.getValue() == null ? null
-          : quicklink.getValue().getAsString());
-    }
-    if (!appQuicklinks.isEmpty()) {
-      app.setQuicklinks(appQuicklinks);
-    }
-
-    ArrayList<String> componentNames = new ArrayList<>();
-
-    // status.live
-    JsonObject applicationStatus = jsonGetAsObject(appStatus, "status");
-    // roles
-    JsonObject applicationRoles = jsonGetAsObject(appStatus, "roles");
-    // statistics
-    JsonObject applicationStatistics = jsonGetAsObject(appStatus, "statistics");
-    if (applicationRoles == null) {
-      // initialize to empty object to avoid too many null checks
-      applicationRoles = EMPTY_JSON_OBJECT;
-    }
-    if (applicationStatus != null) {
-      JsonObject applicationLive = jsonGetAsObject(applicationStatus, "live");
-      if (applicationLive != null) {
-        for (Entry<String, JsonElement> entry : applicationLive.entrySet()) {
-          if (entry.getKey().equals(SLIDER_APPMASTER_COMPONENT_NAME)) {
-            continue;
-          }
-          componentNames.add(entry.getKey());
-          JsonObject componentRole = applicationRoles
-              .get(entry.getKey()) == null ? EMPTY_JSON_OBJECT
-                  : applicationRoles.get(entry.getKey()).getAsJsonObject();
-          JsonObject liveContainers = entry.getValue().getAsJsonObject();
-          if (liveContainers != null) {
-            for (Map.Entry<String, JsonElement> liveContainerEntry : liveContainers
-                .entrySet()) {
-              String containerId = liveContainerEntry.getKey();
-              Container container = new Container();
-              container.setId(containerId);
-              JsonObject liveContainer = (JsonObject) liveContainerEntry
-                  .getValue();
-              container
-                  .setLaunchTime(liveContainer.get("startTime") == null ? null
-                      : new Date(liveContainer.get("startTime").getAsLong()));
-              container
-                  .setComponentName(jsonGetAsString(liveContainer, "role"));
-              container.setIp(jsonGetAsString(liveContainer, "ip"));
-              // If ip is non-null increment count
-              if (container.getIp() != null) {
-                totalNumberOfIpAssignedContainers++;
-              }
-              container.setHostname(jsonGetAsString(liveContainer, "hostname"));
-              container.setState(ContainerState.INIT);
-              if (StringUtils.isNotEmpty(container.getIp())
-                  && StringUtils.isNotEmpty(container.getHostname())) {
-                container.setState(ContainerState.READY);
-              }
-              container.setBareHost(jsonGetAsString(liveContainer, "host"));
-              container.setUri(CONTEXT_ROOT + APPLICATIONS_API_RESOURCE_PATH
-                  + "/" + appName + CONTAINERS_API_RESOURCE_PATH + "/"
-                  + containerId);
-              Resource resource = new Resource();
-              resource.setCpus(jsonGetAsInt(componentRole, "yarn.vcores"));
-              resource.setMemory(jsonGetAsString(componentRole, "yarn.memory"));
-              container.setResource(resource);
-              Artifact artifact = new Artifact();
-              String dockerImageName = jsonGetAsString(componentRole,
-                  "docker.image");
-              if (StringUtils.isNotEmpty(dockerImageName)) {
-                artifact.setId(dockerImageName);
-                artifact.setType(Artifact.TypeEnum.DOCKER);
-              } else {
-                // Might have to handle tarballs here
-                artifact.setType(null);
-              }
-              container.setArtifact(artifact);
-              container.setPrivilegedContainer(
-                  jsonGetAsBoolean(componentRole, "docker.usePrivileged"));
-              // TODO: add container property - for response only?
-              app.addContainer(container);
-            }
-          }
-        }
-      }
-    }
-
-    // application info
-    if (applicationRoles != null && !componentNames.isEmpty()) {
-      JsonObject applicationRole = jsonGetAsObject(applicationRoles,
-          componentNames.get(0));
-      if (applicationRole != null) {
-        Artifact artifact = new Artifact();
-        // how to get artifact id - docker image name??
-        artifact.setId(null);
-      }
-    }
-
-    // actual and expected number of containers
-    if (applicationStatistics != null) {
-      for (Entry<String, JsonElement> entry : applicationStatistics.entrySet()) {
-        if (entry.getKey().equals(SLIDER_APPMASTER_COMPONENT_NAME)) {
-          continue;
-        }
-        JsonObject containerStats = (JsonObject) entry.getValue();
-        totalNumberOfRunningContainers += jsonGetAsInt(containerStats,
-            "containers.live");
-        totalExpectedNumberOfRunningContainers += jsonGetAsInt(containerStats,
-            "containers.desired");
-      }
-      app.setNumberOfContainers(totalExpectedNumberOfRunningContainers);
-      app.setNumberOfRunningContainers(totalNumberOfRunningContainers);
-    }
-
-    // If all containers of the app has IP assigned, then according to the REST
-    // API it is considered to be READY. Note, application readiness from
-    // end-users point of view, is out of scope of the REST API. Also, this
-    // readiness has nothing to do with readiness-check defined at the component
-    // level (which is used for dependency resolution of component DAG).
-    if (totalNumberOfIpAssignedContainers
-        .longValue() == totalExpectedNumberOfRunningContainers.longValue()) {
-      app.setState(ApplicationState.READY);
-    }
-    logger.info("Application = {}", app);
-    return Response.ok(app).build();
-  }
-
-  private String jsonGetAsString(JsonObject object, String key) {
-    return object.get(key) == null ? null : object.get(key).getAsString();
-  }
-
-  private Integer jsonGetAsInt(JsonObject object, String key) {
-    return object.get(key) == null ? null
-        : object.get(key).isJsonNull() ? null : object.get(key).getAsInt();
-  }
-
-  private Boolean jsonGetAsBoolean(JsonObject object, String key) {
-    return object.get(key) == null ? null
-        : object.get(key).isJsonNull() ? null : object.get(key).getAsBoolean();
-  }
-
-  private JsonObject jsonGetAsObject(JsonObject object, String key) {
-    return object.get(key) == null ? null : object.get(key).getAsJsonObject();
-  }
-
-  private long queryLifetime(String appName) {
-    try {
-      return invokeSliderClientRunnable(
-          new SliderClientContextRunnable<Long>() {
-            @Override
-            public Long run(SliderClient sliderClient)
-                throws YarnException, IOException, InterruptedException {
-              ApplicationReport report = sliderClient.findInstance(appName);
-              return report.getApplicationTimeouts()
-                  .get(ApplicationTimeoutType.LIFETIME).getRemainingTime();
-            }
-          });
-    } catch (Exception e) {
-      logger.error("Error when querying lifetime for " + appName, e);
-      return DEFAULT_UNLIMITED_LIFETIME;
-    }
-  }
-
-  private JsonObject getSliderApplicationStatus(final String appName)
-      throws IOException, YarnException, InterruptedException {
-
-    return invokeSliderClientRunnable(
-        new SliderClientContextRunnable<JsonObject>() {
-          @Override
-          public JsonObject run(SliderClient sliderClient)
-              throws YarnException, IOException, InterruptedException {
-            String status = null;
-            try {
-              status = sliderClient.actionStatus(appName);
-            } catch (BadClusterStateException e) {
-              logger.warn("Application not running yet", e);
-              return EMPTY_JSON_OBJECT;
-            } catch (Exception e) {
-              logger.error("Exception calling slider.actionStatus", e);
-              return EMPTY_JSON_OBJECT;
-            }
-            JsonElement statusElement = JSON_PARSER.parse(status);
-            return (statusElement == null || statusElement instanceof JsonNull)
-                ? EMPTY_JSON_OBJECT : (JsonObject) statusElement;
-          }
-        });
-  }
-
-  private JsonObject getSliderApplicationRegistry(final String appName,
-      final String registryName)
-      throws IOException, YarnException, InterruptedException {
-    final ActionRegistryArgs registryArgs = new ActionRegistryArgs();
-    registryArgs.name = appName;
-    registryArgs.getConf = registryName;
-    registryArgs.format = ConfigFormat.JSON.toString();
-
-    return invokeSliderClientRunnable(
-        new SliderClientContextRunnable<JsonObject>() {
-          @Override
-          public JsonObject run(SliderClient sliderClient)
-              throws YarnException, IOException, InterruptedException {
-            String registry = null;
-            try {
-              registry = sliderClient.actionRegistryGetConfig(registryArgs)
-                .asJson();
-            } catch (FileNotFoundException | NotFoundException e) {
-              // ignore and return empty object
-              return EMPTY_JSON_OBJECT;
-            } catch (Exception e) {
-              logger.error("Exception calling slider.actionRegistryGetConfig",
-                  e);
-              return EMPTY_JSON_OBJECT;
-            }
-            JsonElement registryElement = JSON_PARSER.parse(registry);
-            return (registryElement == null
-                || registryElement instanceof JsonNull) ? EMPTY_JSON_OBJECT
-                    : (JsonObject) registryElement;
-          }
-        });
-  }
-
-  private Integer getSliderList(final String appName)
-      throws IOException, YarnException, InterruptedException {
-    return getSliderList(appName, true);
-  }
-
-  private Integer getSliderList(final String appName, final boolean liveOnly)
-      throws IOException, YarnException, InterruptedException {
-    return invokeSliderClientRunnable(new SliderClientContextRunnable<Integer>() {
-      @Override
-      public Integer run(SliderClient sliderClient) throws YarnException,
-          IOException, InterruptedException {
-        int status = 0;
-        if (liveOnly) {
-          status = sliderClient.actionList(appName);
-        } else {
-          status = sliderClient.actionList(appName, ACTION_LIST_ARGS);
-        }
-        return status;
-      }
-    });
-  }
-
-  private Set<ApplicationReport> getSliderApplications(final String state)
-      throws IOException, YarnException, InterruptedException {
-    return getSliderApplications(false, state);
-  }
-
-  private Set<ApplicationReport> getSliderApplications(final boolean liveOnly)
-      throws IOException, YarnException, InterruptedException {
-    return getSliderApplications(liveOnly, null);
-  }
-
-  private Set<ApplicationReport> getSliderApplications(final boolean liveOnly,
-      final String state)
-      throws IOException, YarnException, InterruptedException {
-    return invokeSliderClientRunnable(
-        new SliderClientContextRunnable<Set<ApplicationReport>>() {
-          @Override
-          public Set<ApplicationReport> run(SliderClient sliderClient)
-              throws YarnException, IOException, InterruptedException {
-            Set<ApplicationReport> apps;
-            ActionListArgs listArgs = new ActionListArgs();
-            if (liveOnly) {
-              apps = sliderClient.getApplicationList(null);
-            } else if (StringUtils.isNotEmpty(state)) {
-              listArgs.state = state;
-              apps = sliderClient.getApplicationList(null, listArgs);
-            } else {
-              apps = sliderClient.getApplicationList(null, listArgs);
-            }
-            return apps;
-          }
-        });
-  }
-
   @DELETE
   @Path("/{app_name}")
   @Consumes({ MediaType.APPLICATION_JSON })
   @Produces({ MediaType.APPLICATION_JSON })
   public Response deleteApplication(@PathParam("app_name") String appName) {
     logger.info("DELETE: deleteApplication for appName = {}", appName);
+    return stopApplication(appName, true);
+  }
 
+  private Response stopApplication(String appName, boolean destroy) {
     try {
-      Response stopResponse = stopSliderApplication(appName);
-      if (stopResponse.getStatus() == Status.INTERNAL_SERVER_ERROR
-          .getStatusCode()) {
-        return Response.status(Status.NOT_FOUND).build();
+      SLIDER_CLIENT.actionStop(appName, ACTION_FREEZE_ARGS);
+      if (destroy) {
+        SLIDER_CLIENT.actionDestroy(appName);
+        logger.info("Successfully deleted application {}", appName);
+      } else {
+        logger.info("Successfully stopped application {}", appName);
       }
-    } catch (UnknownApplicationInstanceException e) {
-      logger.error("Application does not exist", e);
-      return Response.status(Status.NOT_FOUND).build();
+      return Response.status(Status.NO_CONTENT).build();
+    } catch (ApplicationNotFoundException e) {
+      ApplicationStatus applicationStatus = new ApplicationStatus();
+      applicationStatus.setDiagnostics(
+          "Application " + appName + " not found " + e.getMessage());
+      return Response.status(Status.NOT_FOUND).entity(applicationStatus)
+          .build();
     } catch (Exception e) {
-      logger.error("Delete application failed", e);
-      return Response.status(Status.INTERNAL_SERVER_ERROR).build();
-    }
-
-    // Although slider client stop returns immediately, it usually takes a
-    // little longer for it to stop from YARN point of view. Slider destroy
-    // fails if the application is not completely stopped. Hence the need to
-    // call destroy in a controlled loop few times (only if exit code is
-    // EXIT_APPLICATION_IN_USE or EXIT_INSTANCE_EXISTS), before giving up.
-    boolean keepTrying = true;
-    int maxDeleteAttempts = 5;
-    int deleteAttempts = 0;
-    int sleepIntervalInMillis = 500;
-    while (keepTrying && deleteAttempts < maxDeleteAttempts) {
-      try {
-        destroySliderApplication(appName);
-        keepTrying = false;
-      } catch (SliderException e) {
-        if (e.getExitCode() == SliderExitCodes.EXIT_APPLICATION_IN_USE
-            || e.getExitCode() == SliderExitCodes.EXIT_INSTANCE_EXISTS) {
-          deleteAttempts++;
-          // If we used up all the allowed delete attempts, let's log it as
-          // error before giving up. Otherwise log as warn.
-          if (deleteAttempts < maxDeleteAttempts) {
-            logger.warn("Application not in stopped state, waiting for {}ms"
-                + " before trying delete again", sleepIntervalInMillis);
-          } else {
-            logger.error("Delete application failed", e);
-          }
-          try {
-            Thread.sleep(sleepIntervalInMillis);
-          } catch (InterruptedException e1) {
-          }
-        } else {
-          logger.error("Delete application threw exception", e);
-          return Response.status(Status.INTERNAL_SERVER_ERROR).build();
-        }
-      } catch (Exception e) {
-        logger.error("Delete application failed", e);
-        return Response.status(Status.INTERNAL_SERVER_ERROR).build();
-      }
+      ApplicationStatus applicationStatus = new ApplicationStatus();
+      applicationStatus.setDiagnostics(e.getMessage());
+      return Response.status(Status.INTERNAL_SERVER_ERROR)
+          .entity(applicationStatus).build();
     }
-    return Response.status(Status.NO_CONTENT).build();
   }
 
-  private Response stopSliderApplication(final String appName)
-      throws IOException, YarnException, InterruptedException {
-    return invokeSliderClientRunnable(new SliderClientContextRunnable<Response>() {
-      @Override
-      public Response run(SliderClient sliderClient) throws YarnException,
-          IOException, InterruptedException {
-        int returnCode = sliderClient.actionFreeze(appName, ACTION_FREEZE_ARGS);
-        if (returnCode == 0) {
-          logger.info("Successfully stopped application {}", appName);
-          return Response.status(Status.NO_CONTENT).build();
-        } else {
-          logger.error("Stop of application {} failed with return code ",
-              appName, returnCode);
-          ApplicationStatus applicationStatus = new ApplicationStatus();
-          applicationStatus.setDiagnostics("Stop of application " + appName
-              + " failed");
-          return Response.status(Status.INTERNAL_SERVER_ERROR)
-              .entity(applicationStatus).build();
-        }
-      }
-    });
-  }
-
-  private Response startSliderApplication(final String appName, Application app)
-      throws IOException, YarnException, InterruptedException {
-    return invokeSliderClientRunnable(new SliderClientContextRunnable<Response>() {
-      @Override
-      public Response run(SliderClient sliderClient) throws YarnException,
-          IOException, InterruptedException {
-        ActionThawArgs thawArgs = new ActionThawArgs();
-        if (app.getLifetime() == null) {
-          app.setLifetime(DEFAULT_UNLIMITED_LIFETIME);
-        }
-        thawArgs.lifetime = app.getLifetime();
-        int returnCode = sliderClient.actionThaw(appName, thawArgs);
-        if (returnCode == 0) {
-          logger.info("Successfully started application {}", appName);
-          ApplicationStatus applicationStatus = new ApplicationStatus();
-          applicationStatus.setState(ApplicationState.ACCEPTED);
-          applicationStatus.setUri(CONTEXT_ROOT
-              + APPLICATIONS_API_RESOURCE_PATH + "/" + appName);
-          // 202 = ACCEPTED
-          return Response.status(HTTP_STATUS_CODE_ACCEPTED)
-              .entity(applicationStatus).build();
-        } else {
-          logger.error("Start of application {} failed with returnCode ",
-              appName, returnCode);
-          ApplicationStatus applicationStatus = new ApplicationStatus();
-          applicationStatus.setDiagnostics("Start of application " + appName
-              + " failed");
-          return Response.status(Status.INTERNAL_SERVER_ERROR)
-              .entity(applicationStatus).build();
-        }
-      }
-    });
-  }
+  @PUT @Path("/{app_name}/components/{component_name}")
+  @Consumes({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON })
+  public Response updateComponent(@PathParam("app_name") String appName,
+      @PathParam("component_name") String componentName, Component component) {
 
-  private Void destroySliderApplication(final String appName)
-      throws IOException, YarnException, InterruptedException {
-    return invokeSliderClientRunnable(new SliderClientContextRunnable<Void>() {
-      @Override
-      public Void run(SliderClient sliderClient) throws YarnException,
-          IOException, InterruptedException {
-        sliderClient.actionDestroy(appName);
-        return null;
-      }
-    });
+    if (component.getNumberOfContainers() < 0) {
+      return Response.status(Status.BAD_REQUEST).entity(
+          "Application = " + appName + ", Component = " + component.getName()
+              + ": Invalid number of containers specified " + component
+              .getNumberOfContainers()).build();
+    }
+    try {
+      long original = SLIDER_CLIENT.flex(appName, component);
+      return Response.ok().entity(
+          "Updating " + componentName + " size from " + original + " to "
+              + component.getNumberOfContainers()).build();
+    } catch (YarnException | IOException e) {
+      ApplicationStatus status = new ApplicationStatus();
+      status.setDiagnostics(e.getMessage());
+      return Response.status(Status.INTERNAL_SERVER_ERROR).entity(status)
+          .build();
+    }
   }
 
-  @PUT
-  @Path("/{app_name}")
+  @PUT @Path("/{app_name}")
   @Consumes({ MediaType.APPLICATION_JSON })
   @Produces({ MediaType.APPLICATION_JSON })
   public Response updateApplication(@PathParam("app_name") String appName,
@@ -1354,158 +275,70 @@ public class ApplicationApiService implements ApplicationApi {
     // path param
     updateAppData.setName(appName);
 
-    // Adding support for stop and start
     // For STOP the app should be running. If already stopped then this
     // operation will be a no-op. For START it should be in stopped state.
     // If already running then this operation will be a no-op.
-
-    // Check if app exists in any state
-    try {
-      int appsFound = getSliderList(appName, false);
-      if (appsFound < 0) {
-        return Response.status(Status.NOT_FOUND).build();
-      }
-    } catch (Exception e) {
-      logger.error("Update application failed", e);
-      return Response.status(Status.NOT_FOUND).build();
-    }
-
-    // If a STOP is requested
     if (updateAppData.getState() != null
         && updateAppData.getState() == ApplicationState.STOPPED) {
-      try {
-        int livenessCheck = getSliderList(appName);
-        if (livenessCheck == 0) {
-          return stopSliderApplication(appName);
-        } else {
-          logger.info("Application {} is already stopped", appName);
-          ApplicationStatus applicationStatus = new ApplicationStatus();
-          applicationStatus.setDiagnostics("Application " + appName
-              + " is already stopped");
-          return Response.status(Status.BAD_REQUEST).entity(applicationStatus)
-              .build();
-        }
-      } catch (Exception e) {
-        logger.error("Stop application failed", e);
-        return Response.status(Status.INTERNAL_SERVER_ERROR).build();
-      }
+      return stopApplication(appName, false);
     }
 
     // If a START is requested
     if (updateAppData.getState() != null
         && updateAppData.getState() == ApplicationState.STARTED) {
-      try {
-        int livenessCheck = getSliderList(appName);
-        if (livenessCheck != 0) {
-          return startSliderApplication(appName, updateAppData);
-        } else {
-          logger.info("Application {} is already running", appName);
-          ApplicationStatus applicationStatus = new ApplicationStatus();
-          applicationStatus.setDiagnostics("Application " + appName
-              + " is already running");
-          applicationStatus.setUri(CONTEXT_ROOT
-              + APPLICATIONS_API_RESOURCE_PATH + "/" + appName);
-          return Response.status(Status.BAD_REQUEST).entity(applicationStatus)
-              .build();
-        }
-      } catch (Exception e) {
-        logger.error("Start application failed", e);
-        return Response.status(Status.INTERNAL_SERVER_ERROR).build();
-      }
-    }
-
-    // If no of instances specified then treat it as a flex
-    if (updateAppData.getNumberOfContainers() != null
-        && updateAppData.getComponents() == null) {
-      updateAppData.setComponents(getDefaultComponentAsList());
-    }
-
-    // At this point if there are components then it is a flex
-    if (updateAppData.getComponents() != null) {
-      try {
-        int livenessCheck = getSliderList(appName);
-        if (livenessCheck == 0) {
-          flexSliderApplication(appName, updateAppData);
-        }
-        return Response.status(Status.NO_CONTENT).build();
-      } catch (Exception e) {
-        logger.error("Update application failed", e);
-        return Response.status(Status.INTERNAL_SERVER_ERROR).build();
-      }
+      return startApplication(appName);
     }
 
     // If new lifetime value specified then update it
     if (updateAppData.getLifetime() != null
         && updateAppData.getLifetime() > 0) {
-      try {
-        updateAppLifetime(appName, updateAppData.getLifetime());
-      } catch (Exception e) {
-        logger.error("Failed to update application (" + appName + ") lifetime ("
-            + updateAppData.getLifetime() + ")", e);
-        return Response.status(Status.INTERNAL_SERVER_ERROR).build();
-      }
+      return updateLifetime(appName, updateAppData);
+    }
+
+    // flex a single component app
+    if (updateAppData.getNumberOfContainers() != null && !ServiceApiUtil
+        .hasComponent(
+        updateAppData)) {
+      Component defaultComp = ServiceApiUtil.createDefaultComponent(updateAppData);
+      return updateComponent(updateAppData.getName(), defaultComp.getName(),
+          defaultComp);
     }
 
     // If nothing happens consider it a no-op
     return Response.status(Status.NO_CONTENT).build();
   }
 
-  private Void updateAppLifetime(String appName, long lifetime)
-      throws InterruptedException, YarnException, IOException {
-    return invokeSliderClientRunnable(new SliderClientContextRunnable<Void>() {
-      @Override public Void run(SliderClient sliderClient)
-          throws YarnException, IOException, InterruptedException {
-        ActionUpdateArgs args = new ActionUpdateArgs();
-        args.lifetime = lifetime;
-        sliderClient.actionUpdate(appName, args);
-        return null;
-      }
-    });
-  }
-
-  // create default component and initialize with app level global values
-  private List<Component> getDefaultComponentAsList(Application app) {
-    List<Component> comps = getDefaultComponentAsList();
-    Component comp = comps.get(0);
-    comp.setArtifact(app.getArtifact());
-    comp.setResource(app.getResource());
-    comp.setNumberOfContainers(app.getNumberOfContainers());
-    comp.setLaunchCommand(app.getLaunchCommand());
-    return comps;
-  }
-
-  private List<Component> getDefaultComponentAsList() {
-    Component comp = new Component();
-    comp.setName(DEFAULT_COMPONENT_NAME);
-    List<Component> comps = new ArrayList<>();
-    comps.add(comp);
-    return comps;
+  private Response updateLifetime(String appName, Application updateAppData) {
+    try {
+      String newLifeTime =
+          SLIDER_CLIENT.updateLifetime(appName, updateAppData.getLifetime());
+      return Response.ok("Application " + appName + " lifeTime is successfully updated to "
+          + updateAppData.getLifetime() + " seconds from now: " + newLifeTime).build();
+    } catch (Exception e) {
+      String message =
+          "Failed to update application (" + appName + ") lifetime ("
+              + updateAppData.getLifetime() + ")";
+      logger.error(message, e);
+      return Response.status(Status.INTERNAL_SERVER_ERROR)
+          .entity(message + " : " + e.getMessage()).build();
+    }
   }
 
-  private Void flexSliderApplication(final String appName,
-      final Application updateAppData) throws IOException, YarnException,
-      InterruptedException {
-    return invokeSliderClientRunnable(new SliderClientContextRunnable<Void>() {
-      @Override
-      public Void run(SliderClient sliderClient) throws YarnException,
-          IOException, InterruptedException {
-        ActionFlexArgs flexArgs = new ActionFlexArgs();
-        ComponentArgsDelegate compDelegate = new ComponentArgsDelegate();
-        Long globalNumberOfContainers = updateAppData.getNumberOfContainers();
-        for (Component comp : updateAppData.getComponents()) {
-          Long noOfContainers = comp.getNumberOfContainers() == null
-              ? globalNumberOfContainers : comp.getNumberOfContainers();
-          if (noOfContainers != null) {
-            compDelegate.componentTuples.addAll(
-                Arrays.asList(comp.getName(), String.valueOf(noOfContainers)));
-          }
-        }
-        if (!compDelegate.componentTuples.isEmpty()) {
-          flexArgs.componentDelegate = compDelegate;
-          sliderClient.actionFlex(appName, flexArgs);
-        }
-        return null;
-      }
-    });
+  private Response startApplication(String appName) {
+    try {
+      int ret = SLIDER_CLIENT.actionList(appName);
+      if (ret == 0) {
+        return Response.ok()
+            .entity("Application " + appName + " is already alive.").build();
+      }
+      SLIDER_CLIENT.actionStart(appName, null);
+      logger.info("Successfully started application " + appName);
+      return Response.ok("Application " + appName + " is successfully started").build();
+    } catch (Exception e) {
+      String message = "Failed to start application " + appName;
+      logger.info(message, e);
+      return Response.status(Status.INTERNAL_SERVER_ERROR)
+          .entity(message + ": " + e.getMessage()).build();
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/utils/RestApiConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/utils/RestApiConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/utils/RestApiConstants.java
deleted file mode 100644
index 23b7ad4..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/utils/RestApiConstants.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.utils;
-
-public interface RestApiConstants {
-  String CONTEXT_ROOT = "/services/v1";
-  String APPLICATIONS_API_RESOURCE_PATH = "/applications";
-  String CONTAINERS_API_RESOURCE_PATH = "/containers";
-  String SLIDER_APPMASTER_COMPONENT_NAME = "slider-appmaster";
-  String SLIDER_CONFIG_SCHEMA = "http://example.org/specification/v2.0.0";
-  String METAINFO_SCHEMA_VERSION = "2.1";
-  String COMPONENT_TYPE_YARN_DOCKER = "yarn_docker";
-
-  String DEFAULT_START_CMD = "/bootstrap/privileged-centos6-sshd";
-  String DEFAULT_COMPONENT_NAME = "DEFAULT";
-  String DEFAULT_IMAGE = "centos:centos6";
-  String DEFAULT_NETWORK = "bridge";
-  String DEFAULT_COMMAND_PATH = "/usr/bin/docker";
-  String DEFAULT_USE_NETWORK_SCRIPT = "yes";
-
-  String PLACEHOLDER_APP_NAME = "${APP_NAME}";
-  String PLACEHOLDER_APP_COMPONENT_NAME = "${APP_COMPONENT_NAME}";
-  String PLACEHOLDER_COMPONENT_ID = "${COMPONENT_ID}";
-
-  String PROPERTY_REST_SERVICE_HOST = "REST_SERVICE_HOST";
-  String PROPERTY_REST_SERVICE_PORT = "REST_SERVICE_PORT";
-  String PROPERTY_APP_LIFETIME = "docker.lifetime";
-  String PROPERTY_APP_RUNAS_USER = "APP_RUNAS_USER";
-  Long DEFAULT_UNLIMITED_LIFETIME = -1l;
-
-  Integer HTTP_STATUS_CODE_ACCEPTED = 202;
-  String ARTIFACT_TYPE_SLIDER_ZIP = "slider-zip";
-
-  Integer GET_APPLICATIONS_THREAD_POOL_SIZE = 200;
-
-  String PROPERTY_PYTHON_PATH = "python.path";
-  String PROPERTY_DNS_DEPENDENCY = "site.global.dns.dependency";
-
-  String COMMAND_ORDER_SUFFIX_START = "-START";
-  String COMMAND_ORDER_SUFFIX_STARTED = "-STARTED";
-  String EXPORT_GROUP_NAME = "QuickLinks";
-
-  Integer ERROR_CODE_APP_DOES_NOT_EXIST = 404001;
-  Integer ERROR_CODE_APP_IS_NOT_RUNNING = 404002;
-  Integer ERROR_CODE_APP_SUBMITTED_BUT_NOT_RUNNING_YET = 404003;
-  Integer ERROR_CODE_APP_NAME_INVALID = 404004;
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/utils/RestApiErrorMessages.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/utils/RestApiErrorMessages.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/utils/RestApiErrorMessages.java
deleted file mode 100644
index 2d739a4..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/utils/RestApiErrorMessages.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.utils;
-
-public interface RestApiErrorMessages {
-  String ERROR_APPLICATION_NAME_INVALID =
-      "Application name is either empty or not provided";
-  String ERROR_APPLICATION_NAME_INVALID_FORMAT =
-      "Application name is not valid - only lower case letters, digits,"
-          + " underscore and hyphen are allowed";
-
-  String ERROR_APPLICATION_NOT_RUNNING = "Application not running";
-  String ERROR_APPLICATION_DOES_NOT_EXIST = "Application not found";
-  String ERROR_APPLICATION_IN_USE = "Application already exists in started"
-      + " state";
-  String ERROR_APPLICATION_INSTANCE_EXISTS = "Application already exists in"
-      + " stopped/failed state (either restart with PUT or destroy with DELETE"
-      + " before creating a new one)";
-
-  String ERROR_SUFFIX_FOR_COMPONENT =
-      " for component %s (nor at the global level)";
-  String ERROR_ARTIFACT_INVALID = "Artifact is not provided";
-  String ERROR_ARTIFACT_FOR_COMP_INVALID =
-      ERROR_ARTIFACT_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
-  String ERROR_ARTIFACT_ID_INVALID =
-      "Artifact id (like docker image name) is either empty or not provided";
-  String ERROR_ARTIFACT_ID_FOR_COMP_INVALID =
-      ERROR_ARTIFACT_ID_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
-
-  String ERROR_RESOURCE_INVALID = "Resource is not provided";
-  String ERROR_RESOURCE_FOR_COMP_INVALID =
-      ERROR_RESOURCE_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
-  String ERROR_RESOURCE_MEMORY_INVALID =
-      "Application resource or memory not provided";
-  String ERROR_RESOURCE_CPUS_INVALID =
-      "Application resource or cpus not provided";
-  String ERROR_RESOURCE_CPUS_INVALID_RANGE =
-      "Unacceptable no of cpus specified, either zero or negative";
-  String ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID =
-      ERROR_RESOURCE_MEMORY_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
-  String ERROR_RESOURCE_CPUS_FOR_COMP_INVALID =
-      ERROR_RESOURCE_CPUS_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
-  String ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE =
-      ERROR_RESOURCE_CPUS_INVALID_RANGE
-          + " for component %s (or at the global level)";
-  String ERROR_CONTAINERS_COUNT_INVALID =
-      "Required no of containers not specified";
-  String ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID =
-      ERROR_CONTAINERS_COUNT_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
-
-  String ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED =
-      "Cannot specify" + " cpus/memory along with profile";
-  String ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED =
-      ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED
-          + " for component %s";
-  String ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET =
-      "Resource profile is not " + "supported yet. Please specify cpus/memory.";
-
-  String ERROR_NULL_ARTIFACT_ID =
-      "Artifact Id can not be null if artifact type is none";
-  String ERROR_ABSENT_NUM_OF_INSTANCE =
-      "Num of instances should appear either globally or per component";
-  String ERROR_ABSENT_LAUNCH_COMMAND =
-      "launch command should appear if type is slider-zip or none";
-
-  String ERROR_QUICKLINKS_FOR_COMP_INVALID = "Quicklinks specified at"
-      + " component level, needs corresponding values set at application level";
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/webapp/ApplicationApiWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/webapp/ApplicationApiWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/webapp/ApplicationApiWebApp.java
index e1bddb5..7fc01a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/webapp/ApplicationApiWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/webapp/ApplicationApiWebApp.java
@@ -17,7 +17,7 @@
 
 package org.apache.hadoop.yarn.services.webapp;
 
-import static org.apache.hadoop.yarn.services.utils.RestApiConstants.*;
+import static org.apache.slider.util.RestApiConstants.*;
 
 import java.io.IOException;
 import java.net.InetAddress;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: YARN-6160. Create an agent-less docker-less provider in the native services framework. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestProviderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestProviderFactory.java
index 11abdfe..e58d981 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestProviderFactory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestProviderFactory.java
@@ -18,11 +18,16 @@
 
 package org.apache.slider.providers;
 
-import org.apache.slider.providers.docker.DockerKeys;
+import org.apache.slider.api.resource.Artifact;
+import org.apache.slider.api.resource.Artifact.TypeEnum;
+import org.apache.slider.providers.docker.DockerClientProvider;
 import org.apache.slider.providers.docker.DockerProviderFactory;
+import org.apache.slider.providers.docker.DockerProviderService;
+import org.apache.slider.providers.tarball.TarballClientProvider;
+import org.apache.slider.providers.tarball.TarballProviderFactory;
+import org.apache.slider.providers.tarball.TarballProviderService;
 import org.junit.Test;
 
-import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -30,25 +35,38 @@ import static org.junit.Assert.assertTrue;
  */
 public class TestProviderFactory {
   @Test
-  public void testLoadAgentProvider() throws Throwable {
+  public void testDockerFactory() throws Throwable {
     SliderProviderFactory factory = SliderProviderFactory
-        .createSliderProviderFactory(DockerKeys.PROVIDER_DOCKER);
+        .createSliderProviderFactory(new Artifact().type(TypeEnum.DOCKER));
     assertTrue(factory instanceof DockerProviderFactory);
+    assertTrue(factory.createClientProvider() instanceof DockerClientProvider);
+    assertTrue(factory.createServerProvider() instanceof DockerProviderService);
+    assertTrue(SliderProviderFactory.getProviderService(new Artifact()
+        .type(TypeEnum.DOCKER)) instanceof DockerProviderService);
   }
 
   @Test
-  public void testCreateClientProvider() throws Throwable {
+  public void testTarballFactory() throws Throwable {
     SliderProviderFactory factory = SliderProviderFactory
-        .createSliderProviderFactory(DockerKeys.PROVIDER_DOCKER);
-    assertNotNull(factory.createClientProvider());
+        .createSliderProviderFactory(new Artifact().type(TypeEnum.TARBALL));
+    assertTrue(factory instanceof TarballProviderFactory);
+    assertTrue(factory.createClientProvider() instanceof TarballClientProvider);
+    assertTrue(factory.createServerProvider() instanceof
+        TarballProviderService);
+    assertTrue(SliderProviderFactory.getProviderService(new Artifact()
+        .type(TypeEnum.TARBALL)) instanceof TarballProviderService);
   }
 
   @Test
-  public void testCreateProviderByClassname() throws Throwable {
+  public void testDefaultFactory() throws Throwable {
     SliderProviderFactory factory = SliderProviderFactory
-        .createSliderProviderFactory(DockerKeys.PROVIDER_DOCKER);
-    assertNotNull(factory.createServerProvider());
-    assertTrue(factory instanceof DockerProviderFactory);
+        .createSliderProviderFactory(null);
+    assertTrue(factory instanceof DefaultProviderFactory);
+    assertTrue(factory.createClientProvider() instanceof DefaultClientProvider);
+    assertTrue(factory.createServerProvider() instanceof
+        DefaultProviderService);
+    assertTrue(SliderProviderFactory.getProviderService(null) instanceof
+        DefaultProviderService);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.java
index e4e344e..bc6cfd0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.java
@@ -20,9 +20,7 @@ import com.google.inject.AbstractModule;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.slider.providers.ProviderService;
 import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
-import org.apache.slider.server.appmaster.model.mock.MockProviderService;
 import org.apache.slider.server.appmaster.state.ProviderAppState;
 import org.apache.slider.server.appmaster.web.WebAppApi;
 import org.apache.slider.server.appmaster.web.WebAppApiImpl;
@@ -45,11 +43,9 @@ public class TestClusterSpecificationBlock extends BaseMockAppStateTest {
     ProviderAppState providerAppState = new ProviderAppState(
         "undefined",
         appState);
-    ProviderService providerService = new MockProviderService();
 
     WebAppApiImpl inst = new WebAppApiImpl(
         providerAppState,
-        providerService,
         null,
         null, null);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.java
index 92f8559..33385db 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.java
@@ -27,12 +27,10 @@ import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.hamlet.HamletImpl.EImp;
 import org.apache.slider.api.ClusterNode;
-import org.apache.slider.providers.ProviderService;
 import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
 import org.apache.slider.server.appmaster.model.mock.MockContainer;
 import org.apache.slider.server.appmaster.model.mock.MockContainerId;
 import org.apache.slider.server.appmaster.model.mock.MockNodeId;
-import org.apache.slider.server.appmaster.model.mock.MockProviderService;
 import org.apache.slider.server.appmaster.model.mock.MockResource;
 import org.apache.slider.server.appmaster.state.ProviderAppState;
 import org.apache.slider.server.appmaster.state.RoleInstance;
@@ -65,14 +63,12 @@ public class TestContainerStatsBlock extends BaseMockAppStateTest {
   @Before
   public void setup() throws Exception {
     super.setup();
-    ProviderService providerService = new MockProviderService();
     ProviderAppState providerAppState = new ProviderAppState(
         "undefined",
         appState);
 
     WebAppApiImpl inst = new WebAppApiImpl(
         providerAppState,
-        providerService,
         null,
         METRICS, null);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62ceedf2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestIndexBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestIndexBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestIndexBlock.java
index 96eb3d7..b2d0637 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestIndexBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestIndexBlock.java
@@ -22,12 +22,10 @@ import com.google.inject.Injector;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.slider.providers.ProviderService;
 import org.apache.slider.server.appmaster.model.appstate.BaseMockAppStateAATest;
 import org.apache.slider.server.appmaster.model.mock.MockContainer;
 import org.apache.slider.server.appmaster.model.mock.MockContainerId;
 import org.apache.slider.server.appmaster.model.mock.MockNodeId;
-import org.apache.slider.server.appmaster.model.mock.MockProviderService;
 import org.apache.slider.server.appmaster.model.mock.MockResource;
 import org.apache.slider.server.appmaster.state.ContainerOutcome;
 import org.apache.slider.server.appmaster.state.OutstandingRequest;
@@ -58,14 +56,12 @@ public class TestIndexBlock extends BaseMockAppStateAATest {
   public void setup() throws Exception {
     super.setup();
     assertNotNull(appState);
-    ProviderService providerService = new MockProviderService();
     ProviderAppState providerAppState = new ProviderAppState(
         "undefined",
         appState);
 
     WebAppApiImpl inst = new WebAppApiImpl(
         providerAppState,
-        providerService,
         null,
         METRICS, null);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: YARN-6655. Fix compilation failure in TestServiceApiUtil due to changes in YARN-6716.

Posted by ji...@apache.org.
YARN-6655. Fix compilation failure in TestServiceApiUtil due to changes in YARN-6716.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36505c89
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36505c89
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36505c89

Branch: refs/heads/yarn-native-services
Commit: 36505c89ff24397e67b084a2f0ddbd030da641a0
Parents: 4054515
Author: Billie Rinaldi <bi...@apache.org>
Authored: Fri Jun 30 09:12:29 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:26 2017 -0700

----------------------------------------------------------------------
 .../src/test/java/org/apache/slider/utils/TestServiceApiUtil.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36505c89/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
index 889cc04..28f36de 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestServiceApiUtil.java
@@ -463,7 +463,8 @@ public class TestServiceApiUtil {
     Application application = createValidApplication(null);
     application.setComponents(Arrays.asList(c, d, e));
     try {
-      ServiceApiUtil.validateAndResolveApplication(application, sfs);
+      ServiceApiUtil.validateAndResolveApplication(application, sfs,
+          CONF_DEFAULT_DNS);
       Assert.fail(EXCEPTION_PREFIX + "components with bad dependencies");
     } catch (IllegalArgumentException ex) {
       assertEquals(String.format(


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: YARN-6335. Port slider's groovy unit tests to yarn native services. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionUpgradeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionUpgradeArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionUpgradeArgs.java
index 18aa1f5..eb87108 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionUpgradeArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionUpgradeArgs.java
@@ -26,7 +26,7 @@ import java.util.List;
 
 @Parameters(commandNames = { SliderActions.ACTION_UPGRADE },
             commandDescription = SliderActions.DESCRIBE_ACTION_UPGRADE)
-public class ActionUpgradeArgs extends AbstractActionArgs {
+public class ActionUpgradeArgs extends AbstractClusterBuildingActionArgs {
 
   @Override
   public String getActionName() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AppAndResouceOptionArgsDelegate.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AppAndResouceOptionArgsDelegate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AppAndResouceOptionArgsDelegate.java
deleted file mode 100644
index f171708..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AppAndResouceOptionArgsDelegate.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.common.params;
-
-import com.beust.jcommander.Parameter;
-import org.apache.slider.core.exceptions.BadCommandArgumentsException;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Delegate for application and resource options
- */
-public class AppAndResouceOptionArgsDelegate extends AbstractArgsDelegate {
-
-
-  /**
-   * Options key value
-   */
-  @Parameter(names = {ARG_OPTION, ARG_OPTION_SHORT}, arity = 2,
-             description = ARG_OPTION + "<name> <value>",
-             splitter = DontSplitArguments.class)
-  public List<String> optionTuples = new ArrayList<>(0);
-
-
-  /**
-   * All the app component option triples
-   */
-  @Parameter(names = {ARG_COMP_OPT,  ARG_COMP_OPT_SHORT,  ARG_ROLEOPT}, arity = 3,
-             description = "Component option " + ARG_COMP_OPT +
-                           " <component> <name> <option>",
-             splitter = DontSplitArguments.class)
-  public List<String> compOptTriples = new ArrayList<>(0);
-
-  /**
-   * Resource Options
-   */
-  @Parameter(names = {ARG_RESOURCE_OPT, ARG_RESOURCE_OPT_SHORT}, arity = 2,
-             description = "Resource option "+ ARG_RESOURCE_OPT + "<name> <value>",
-             splitter = DontSplitArguments.class)
-  public List<String> resOptionTuples = new ArrayList<>(0);
-
-
-  /**
-   * All the resource component option triples
-   */
-  @Parameter(names = {ARG_RES_COMP_OPT, ARG_RES_COMP_OPT_SHORT,}, arity = 3,
-             description = "Component resource option " + ARG_RES_COMP_OPT +
-                           " <component> <name> <option>",
-             splitter = DontSplitArguments.class)
-  public List<String> resCompOptTriples = new ArrayList<>(0);
-
-
-  public Map<String, String> getOptionsMap() throws
-                                             BadCommandArgumentsException {
-    return convertTupleListToMap(ARG_OPTION, optionTuples);
-  }
-
-  /**
-   * Get the role heap mapping (may be empty, but never null)
-   * @return role heap mapping
-   * @throws BadCommandArgumentsException parse problem
-   */
-  public Map<String, Map<String, String>> getCompOptionMap() throws
-                                                             BadCommandArgumentsException {
-    return convertTripleListToMaps(ARG_COMP_OPT, compOptTriples);
-  }
-
-  public Map<String, String> getResourceOptionsMap() throws
-                                             BadCommandArgumentsException {
-    return convertTupleListToMap(ARG_RESOURCE_OPT, resOptionTuples);
-  }
-
-  /**
-   * Get the role heap mapping (may be empty, but never null)
-   * @return role heap mapping
-   * @throws BadCommandArgumentsException parse problem
-   */
-  public Map<String, Map<String, String>> getResourceCompOptionMap() throws
-                                                             BadCommandArgumentsException {
-    return convertTripleListToMaps(ARG_RES_COMP_OPT, resCompOptTriples);
-  }
-
-  public void setOption(String key, String value) {
-    optionTuples.add(key);
-    optionTuples.add(value);
-  }
-
-  public void setResourceOption(String key, String value) {
-    resOptionTuples.add(key);
-    resOptionTuples.add(value);
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/Arguments.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
index 45c1fbd..e978957 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
@@ -107,12 +107,7 @@ public interface Arguments {
   String ARG_LIFETIME = "--lifetime";
   String ARG_REPLACE_PKG = "--replacepkg";
   String ARG_RESOURCE = "--resource";
-  String ARG_RESOURCES = "--resources";
-  String ARG_RES_COMP_OPT = "--rescompopt";
-  String ARG_RES_COMP_OPT_SHORT = "--rco";
   String ARG_RESOURCE_MANAGER = "--rm";
-  String ARG_RESOURCE_OPT = "--resopt";
-  String ARG_RESOURCE_OPT_SHORT = "-ro";
   String ARG_SECURE = "--secure";
   String ARG_SERVICETYPE = "--servicetype";
   String ARG_SERVICES = "--services";
@@ -120,7 +115,6 @@ public interface Arguments {
   String ARG_SOURCE = "--source";
   String ARG_STATE = "--state";
   String ARG_SYSPROP = "-S";
-  String ARG_TEMPLATE = "--template";
   String ARG_TRUSTSTORE = "--truststore";
   String ARG_USER = "--user";
   String ARG_UPLOAD = "--upload";
@@ -136,29 +130,9 @@ public interface Arguments {
  RIGHT PLACE IN THE LIST
  */
 
-
-  /**
-   * Deprecated: use ARG_COMPONENT
-   */
-  @Deprecated
-  String ARG_ROLE = "--role";
-
-  /**
-   * Deprecated: use ARG_COMP_OPT
-   */
-  @Deprecated
-  String ARG_ROLEOPT = "--roleopt";
-
   /**
    * server: URI for the cluster
    */
   String ARG_CLUSTER_URI = "-cluster-uri";
 
-
-  /**
-   * server: Path for the resource manager instance (required)
-   */
-  String ARG_RM_ADDR = "--rm";
-
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
index abd2ce7..dbb5a16 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
@@ -53,6 +53,7 @@ public class ClientArgs extends CommonArgs {
   // =========================================================
 
   private final ActionAMSuicideArgs actionAMSuicideArgs = new ActionAMSuicideArgs();
+  private final ActionBuildArgs actionBuildArgs = new ActionBuildArgs();
   private final ActionClientArgs actionClientArgs = new ActionClientArgs();
   private final ActionCreateArgs actionCreateArgs = new ActionCreateArgs();
   private final ActionDependencyArgs actionDependencyArgs = new ActionDependencyArgs();
@@ -62,8 +63,6 @@ public class ClientArgs extends CommonArgs {
   private final ActionFlexArgs actionFlexArgs = new ActionFlexArgs();
   private final ActionFreezeArgs actionFreezeArgs = new ActionFreezeArgs();
   private final ActionHelpArgs actionHelpArgs = new ActionHelpArgs();
-  private final ActionInstallPackageArgs actionInstallPackageArgs = new ActionInstallPackageArgs();
-  private final ActionInstallKeytabArgs actionInstallKeytabArgs = new ActionInstallKeytabArgs();
   private final ActionKDiagArgs actionKDiagArgs = new ActionKDiagArgs();
   private final ActionKeytabArgs actionKeytabArgs = new ActionKeytabArgs();
   private final ActionKillContainerArgs actionKillContainerArgs =
@@ -71,7 +70,6 @@ public class ClientArgs extends CommonArgs {
   private final ActionListArgs actionListArgs = new ActionListArgs();
   private final ActionLookupArgs actionLookupArgs = new ActionLookupArgs();
   private final ActionNodesArgs actionNodesArgs = new ActionNodesArgs();
-  private final ActionPackageArgs actionPackageArgs = new ActionPackageArgs();
   private final ActionRegistryArgs actionRegistryArgs = new ActionRegistryArgs();
   private final ActionResolveArgs actionResolveArgs = new ActionResolveArgs();
   private final ActionResourceArgs actionResourceArgs = new ActionResourceArgs();
@@ -95,6 +93,7 @@ public class ClientArgs extends CommonArgs {
 
     addActions(
         actionAMSuicideArgs,
+        actionBuildArgs,
         actionClientArgs,
         actionCreateArgs,
         actionDependencyArgs,
@@ -104,15 +103,12 @@ public class ClientArgs extends CommonArgs {
         actionFlexArgs,
         actionFreezeArgs,
         actionHelpArgs,
-        actionInstallKeytabArgs,
-        actionInstallPackageArgs,
         actionKDiagArgs,
         actionKeytabArgs,
         actionKillContainerArgs,
         actionListArgs,
         actionLookupArgs,
         actionNodesArgs,
-        actionPackageArgs,
         actionRegistryArgs,
         actionResolveArgs,
         actionResourceArgs,
@@ -153,14 +149,12 @@ public class ClientArgs extends CommonArgs {
     return actionAMSuicideArgs;
   }
 
-  public ActionInstallPackageArgs getActionInstallPackageArgs() { return actionInstallPackageArgs; }
+  public ActionBuildArgs getActionBuildArgs() {
+    return actionBuildArgs;
+  }
 
   public ActionClientArgs getActionClientArgs() { return actionClientArgs; }
 
-  public ActionPackageArgs getActionPackageArgs() { return actionPackageArgs; }
-
-  public ActionInstallKeytabArgs getActionInstallKeytabArgs() { return actionInstallKeytabArgs; }
-
   public ActionKDiagArgs getActionKDiagArgs() {
     return actionKDiagArgs;
   }
@@ -250,6 +244,12 @@ public class ClientArgs extends CommonArgs {
       action = ACTION_HELP;
     }
     switch (action) {
+      case ACTION_BUILD:
+        bindCoreAction(actionBuildArgs);
+        //its a builder, so set those actions too
+        buildingActionArgs = actionBuildArgs;
+        break;
+
       case ACTION_CREATE:
         bindCoreAction(actionCreateArgs);
         //its a builder, so set those actions too
@@ -296,14 +296,6 @@ public class ClientArgs extends CommonArgs {
         bindCoreAction(actionHelpArgs);
         break;
 
-      case ACTION_INSTALL_KEYTAB:
-        bindCoreAction(actionInstallKeytabArgs);
-        break;
-
-      case ACTION_INSTALL_PACKAGE:
-        bindCoreAction(actionInstallPackageArgs);
-        break;
-
       case ACTION_KDIAG:
         bindCoreAction(actionKDiagArgs);
         break;
@@ -328,10 +320,6 @@ public class ClientArgs extends CommonArgs {
         bindCoreAction(actionNodesArgs);
         break;
 
-      case ACTION_PACKAGE:
-        bindCoreAction(actionPackageArgs);
-        break;
-
       case ACTION_REGISTRY:
         bindCoreAction(actionRegistryArgs);
         break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ComponentArgsDelegate.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ComponentArgsDelegate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ComponentArgsDelegate.java
index 5140059..c819b37 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ComponentArgsDelegate.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ComponentArgsDelegate.java
@@ -30,7 +30,7 @@ public class ComponentArgsDelegate extends AbstractArgsDelegate {
   /**
    * This is a listing of the roles to create
    */
-  @Parameter(names = {ARG_COMPONENT,  ARG_COMPONENT_SHORT, ARG_ROLE},
+  @Parameter(names = {ARG_COMPONENT, ARG_COMPONENT_SHORT},
              arity = 2,
              description = "--component <name> <count> e.g. +1 incr by 1, -2 decr by 2, and 3 makes final count 3",
              splitter = DontSplitArguments.class)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/OptionArgsDelegate.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/OptionArgsDelegate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/OptionArgsDelegate.java
new file mode 100644
index 0000000..e63bd12
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/OptionArgsDelegate.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.params;
+
+import com.beust.jcommander.Parameter;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Delegate for application and resource options.
+ */
+public class OptionArgsDelegate extends AbstractArgsDelegate {
+
+  /**
+   * Options key value.
+   */
+  @Parameter(names = {ARG_OPTION, ARG_OPTION_SHORT}, arity = 2,
+             description = ARG_OPTION + "<name> <value>",
+             splitter = DontSplitArguments.class)
+  public List<String> optionTuples = new ArrayList<>(0);
+
+
+  /**
+   * All the app component option triples.
+   */
+  @Parameter(names = {ARG_COMP_OPT, ARG_COMP_OPT_SHORT}, arity = 3,
+             description = "Component option " + ARG_COMP_OPT +
+                           " <component> <name> <option>",
+             splitter = DontSplitArguments.class)
+  public List<String> compOptTriples = new ArrayList<>(0);
+
+  public Map<String, String> getOptionsMap() throws
+                                             BadCommandArgumentsException {
+    return convertTupleListToMap(ARG_OPTION, optionTuples);
+  }
+
+  /**
+   * Get the role heap mapping (may be empty, but never null).
+   * @return role heap mapping
+   * @throws BadCommandArgumentsException parse problem
+   */
+  public Map<String, Map<String, String>> getCompOptionMap()
+      throws BadCommandArgumentsException {
+    return convertTripleListToMaps(ARG_COMP_OPT, compOptTriples);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
index 82e5903..df1a5fe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
@@ -39,14 +39,12 @@ public interface SliderActions {
   String ACTION_STOP = "stop";
   String ACTION_HELP = "help";
   String ACTION_INSTALL_KEYTAB = "install-keytab";
-  String ACTION_INSTALL_PACKAGE = "install-package";
   String ACTION_KDIAG = "kdiag";
   String ACTION_KEYTAB = "keytab";
   String ACTION_KILL_CONTAINER = "kill-container";
   String ACTION_LIST = "list";
   String ACTION_LOOKUP = "lookup";
   String ACTION_NODES = "nodes";
-  String ACTION_PACKAGE = "package";
   String ACTION_PREFLIGHT = "preflight";
   String ACTION_RECONFIGURE = "reconfigure";
   String ACTION_REGISTRY = "registry";
@@ -99,12 +97,7 @@ public interface SliderActions {
                         "Start a stopped application";
   String DESCRIBE_ACTION_VERSION =
                         "Print the Slider version information";
-  String DESCRIBE_ACTION_INSTALL_PACKAGE = "Install application package." +
-  		" Deprecated, use '" + ACTION_PACKAGE + " " + ClientArgs.ARG_INSTALL + "'.";
-  String DESCRIBE_ACTION_PACKAGE = "Install/list/delete application packages and list app instances that use the packages";
   String DESCRIBE_ACTION_CLIENT = "Install the application client in the specified directory or obtain a client keystore or truststore";
-  String DESCRIBE_ACTION_INSTALL_KEYTAB = "Install the Kerberos keytab." +
-  		" Deprecated, use '" + ACTION_KEYTAB + " " + ClientArgs.ARG_INSTALL + "'.";
   String DESCRIBE_ACTION_KEYTAB = "Manage a Kerberos keytab file (install, delete, list) in the sub-folder 'keytabs' of the user's Slider base directory";
   String DESCRIBE_ACTION_DIAGNOSTIC = "Diagnose the configuration of the running slider application and slider client";
   String DESCRIBE_ACTION_RESOURCE = "Manage a file (install, delete, list) in the 'resources' sub-folder of the user's Slider base directory";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java
index 02eba49..0e94a29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.registry.client.api.RegistryConstants;
 import org.apache.slider.common.SliderKeys;
 import org.apache.slider.common.SliderXmlConfKeys;
 import org.apache.slider.core.exceptions.BadConfigException;
@@ -613,13 +612,6 @@ public class ConfigHelper {
    * Register anything we consider deprecated
    */
   public static void registerDeprecatedConfigItems() {
-    Configuration.addDeprecation(
-        SliderXmlConfKeys.REGISTRY_ZK_QUORUM,
-        RegistryConstants.KEY_REGISTRY_ZK_QUORUM);
-    Configuration.addDeprecation(
-        SliderXmlConfKeys.REGISTRY_PATH,
-        RegistryConstants.KEY_REGISTRY_ZK_ROOT);
-    
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
index 9d7c40a..80b70b9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
@@ -21,14 +21,10 @@ package org.apache.slider.common.tools;
 import com.google.common.base.Preconditions;
 import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
-import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
-import org.apache.commons.compress.archivers.zip.ZipArchiveInputStream;
-import org.apache.commons.io.output.ByteArrayOutputStream;
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -52,6 +48,8 @@ import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.slider.Slider;
 import org.apache.slider.api.RoleKeys;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
 import org.apache.slider.api.types.ContainerInformation;
 import org.apache.slider.common.SliderKeys;
 import org.apache.slider.common.SliderXmlConfKeys;
@@ -70,7 +68,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.BufferedOutputStream;
-import java.io.ByteArrayInputStream;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
@@ -502,6 +499,26 @@ public final class SliderUtils {
   }
 
   /**
+   * Extract the first line of a multi-line string. This is typically used to
+   * prune the stack trace appended to the end of exception messages returned by
+   * YARN in AMRMClientAsync callbacks.
+   *
+   * @param msg
+   *          message string (most likely multi-lines)
+   * @return the first line of a multi-line string or the original string if it
+   *         is a null, empty or single-line
+   */
+  public static String extractFirstLine(String msg) {
+    if (StringUtils.isNotBlank(msg)) {
+      int newlineIndex = msg.indexOf(System.lineSeparator());
+      if (newlineIndex != -1) {
+        msg = msg.substring(0, newlineIndex);
+      }
+    }
+    return msg;
+  }
+
+  /**
    * Create a configuration with Slider-specific tuning.
    * This is done rather than doing custom configs.
    * @return the config
@@ -2046,48 +2063,6 @@ public final class SliderUtils {
     }
   }
 
-  public static InputStream getApplicationResourceInputStream(FileSystem fs,
-      Path appPath,
-      String entry)
-      throws IOException {
-    InputStream is = null;
-    try(FSDataInputStream appStream = fs.open(appPath)) {
-      ZipArchiveInputStream zis = new ZipArchiveInputStream(appStream);
-      ZipArchiveEntry zipEntry;
-      boolean done = false;
-      while (!done && (zipEntry = zis.getNextZipEntry()) != null) {
-        if (entry.equals(zipEntry.getName())) {
-          int size = (int) zipEntry.getSize();
-          if (size != -1) {
-            log.info("Reading {} of size {}", zipEntry.getName(),
-                zipEntry.getSize());
-            byte[] content = new byte[size];
-            int offset = 0;
-            while (offset < size) {
-              offset += zis.read(content, offset, size - offset);
-            }
-            is = new ByteArrayInputStream(content);
-          } else {
-            log.debug("Size unknown. Reading {}", zipEntry.getName());
-            try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
-              while (true) {
-                int byteRead = zis.read();
-                if (byteRead == -1) {
-                  break;
-                }
-                baos.write(byteRead);
-              }
-              is = new ByteArrayInputStream(baos.toByteArray());
-            }
-          }
-          done = true;
-        }
-      }
-    }
-
-    return is;
-  }
-
   /**
    * Check for any needed libraries being present. On Unix none are needed;
    * on windows they must be present
@@ -2525,4 +2500,53 @@ public final class SliderUtils {
     return EnumSet.range(YarnApplicationState.FINISHED,
         YarnApplicationState.KILLED);
   }
+
+  public static final String DAYS = ".days";
+  public static final String HOURS = ".hours";
+  public static final String MINUTES = ".minutes";
+  public static final String SECONDS = ".seconds";
+
+  /**
+   * Get the time range of a set of keys.
+   * @param conf configuration to read properties from
+   * @param basekey base key to which suffix gets applied
+   * @param defDays
+   * @param defHours
+   * @param defMins
+   * @param defSecs
+   * @return the aggregate time range in seconds
+   */
+  public static long getTimeRange(org.apache.slider.api.resource
+      .Configuration conf,
+      String basekey,
+      long defDays,
+      long defHours,
+      long defMins,
+      long defSecs) {
+    Preconditions.checkArgument(basekey != null);
+    long days = conf.getPropertyLong(basekey + DAYS, defDays);
+    long hours = conf.getPropertyLong(basekey + HOURS, defHours);
+
+    long minutes = conf.getPropertyLong(basekey + MINUTES, defMins);
+    long seconds = conf.getPropertyLong(basekey + SECONDS, defSecs);
+    // range check
+    Preconditions.checkState(days >= 0 && hours >= 0 && minutes >= 0
+            && seconds >= 0,
+        "Time range for %s has negative time component %s:%s:%s:%s",
+        basekey, days, hours, minutes, seconds);
+
+    // calculate total time, schedule the reset if expected
+    long totalMinutes = days * 24 * 60 + hours * 24 + minutes;
+    return totalMinutes * 60 + seconds;
+  }
+
+  public static void resolve(Application application) {
+    org.apache.slider.api.resource.Configuration global = application
+        .getConfiguration();
+    for (Component component : application.getComponents()) {
+      mergeMapsIgnoreDuplicateKeys(component.getConfiguration().getProperties(),
+          global.getProperties());
+    }
+    // TODO merge other information to components
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
index 4182459..965ea35 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
@@ -43,7 +43,6 @@ import org.slf4j.LoggerFactory;
 
 import java.lang.reflect.Method;
 import java.lang.reflect.InvocationTargetException;
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
index ef96c9b..4302530 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
@@ -337,5 +337,12 @@ public class ZKIntegration implements Watcher, Closeable {
     return SVC_SLIDER_USERS + "/" + username;
   }
 
-
+  /**
+   * Blocking enum of users.
+   * @return an unordered list of clusters under a user
+   */
+  public List<String> getClusters() throws KeeperException,
+      InterruptedException {
+    return zookeeper.getChildren(userPath, null);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
index e0299e7..cb39368 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
@@ -18,6 +18,7 @@
 
 package org.apache.slider.providers;
 
+import org.apache.slider.api.ResourceKeys;
 import org.apache.slider.api.resource.Component;
 
 /**
@@ -36,6 +37,14 @@ public final class ProviderRole {
   public final String labelExpression;
   public final Component component;
 
+  public ProviderRole(String name, int id) {
+    this(name,
+        id,
+        PlacementPolicy.DEFAULT,
+        ResourceKeys.DEFAULT_NODE_FAILURE_THRESHOLD,
+        ResourceKeys.DEFAULT_PLACEMENT_ESCALATE_DELAY_SECONDS,
+        ResourceKeys.DEF_YARN_LABEL_EXPRESSION);
+  }
 
   /**
    * Create a provider role
@@ -59,7 +68,8 @@ public final class ProviderRole {
         policy,
         nodeFailureThreshold,
         placementTimeoutSeconds,
-        labelExpression, null);
+        labelExpression,
+        new Component().name(name).numberOfContainers(0L));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
index 4aeffa6..7473dab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -97,7 +97,6 @@ import org.apache.slider.common.tools.PortScanner;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.common.tools.SliderUtils;
 import org.apache.slider.common.tools.SliderVersionInfo;
-import org.apache.slider.core.conf.MapOperations;
 import org.apache.slider.core.exceptions.BadConfigException;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.exceptions.SliderInternalStateException;
@@ -855,7 +854,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     providerService.setAMState(stateForProviders);
 
     // chaos monkey
-//    maybeStartMonkey();
+    maybeStartMonkey();
 
     // if not a secure cluster, extract the username -it will be
     // propagated to workers
@@ -1597,7 +1596,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    * @throws SliderException slider problems, including invalid configs
    * @throws IOException IO problems
    */
-  public void flexCluster(Messages.FlexComponentRequestProto request)
+  public void flexCluster(Messages.FlexComponentsRequestProto request)
       throws IOException, SliderException {
     if (request != null) {
       appState.updateComponents(request);
@@ -1619,24 +1618,12 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
     ResetFailureWindow reset = new ResetFailureWindow(rmOperationHandler);
 
-    long days =
-        conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".days",
-            ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_DAYS);
-    long hours =
-        conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".hours",
-            ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_HOURS);
-    long minutes =
-        conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".minutes",
-            ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_MINUTES);
-    long seconds =
-        conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".seconds",
-            0);
-    Preconditions
-        .checkState(days >= 0 && hours >= 0 && minutes >= 0 && seconds >= 0,
-            "Time range for has negative time component %s:%s:%s:%s", days,
-            hours, minutes, seconds);
-    long totalMinutes = days * 24 * 60 + hours * 24 + minutes;
-    long totalSeconds = totalMinutes * 60 + seconds;
+    long totalSeconds = SliderUtils.getTimeRange(conf,
+        ResourceKeys.CONTAINER_FAILURE_WINDOW,
+        ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_DAYS,
+        ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_HOURS,
+        ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_MINUTES,
+        0);
     if (totalSeconds > 0) {
       log.info("Scheduling the failure window reset interval to every {}"
               + " seconds", totalSeconds);
@@ -1810,12 +1797,12 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       LOG_YARN.error("AMRMClientAsync.onError() received {}", e, e);
       signalAMComplete(new ActionStopSlider("stop", EXIT_EXCEPTION_THROWN,
           FinalApplicationStatus.FAILED,
-          "AMRMClientAsync.onError() received " + e));
+          SliderUtils.extractFirstLine(e.getLocalizedMessage())));
     } else if (e instanceof InvalidApplicationMasterRequestException) {
       // halt the AM
       LOG_YARN.error("AMRMClientAsync.onError() received {}", e, e);
       queue(new ActionHalt(EXIT_EXCEPTION_THROWN,
-          "AMRMClientAsync.onError() received " + e));
+          SliderUtils.extractFirstLine(e.getLocalizedMessage())));
     } else {
       // ignore and log
       LOG_YARN.info("Ignoring AMRMClientAsync.onError() received {}", e);
@@ -2040,7 +2027,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    */
   public void onExceptionInThread(Thread thread, Throwable exception) {
     log.error("Exception in {}: {}", thread.getName(), exception, exception);
-    
+
     // if there is a teardown in progress, ignore it
     if (amCompletionFlag.get()) {
       log.info("Ignoring exception: shutdown in progress");
@@ -2052,26 +2039,27 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       signalAMComplete(new ActionStopSlider("stop",
           exitCode,
           FinalApplicationStatus.FAILED,
-          exception.toString()));
+          SliderUtils.extractFirstLine(exception.getLocalizedMessage())));
     }
   }
 
   /**
-   * TODO Start the chaos monkey
+   * TODO Read chaos monkey params from AM configuration rather than app
+   * configuration
    * @return true if it started
    */
   private boolean maybeStartMonkey() {
-//    MapOperations internals = getGlobalInternalOptions();
-    MapOperations internals = new MapOperations();
-    Boolean enabled =
-        internals.getOptionBool(InternalKeys.CHAOS_MONKEY_ENABLED,
-            InternalKeys.DEFAULT_CHAOS_MONKEY_ENABLED);
+    org.apache.slider.api.resource.Configuration configuration =
+        application.getConfiguration();
+    boolean enabled = configuration.getPropertyBool(
+        InternalKeys.CHAOS_MONKEY_ENABLED,
+        InternalKeys.DEFAULT_CHAOS_MONKEY_ENABLED);
     if (!enabled) {
       log.debug("Chaos monkey disabled");
       return false;
     }
     
-    long monkeyInterval = internals.getTimeRange(
+    long monkeyInterval = SliderUtils.getTimeRange(configuration,
         InternalKeys.CHAOS_MONKEY_INTERVAL,
         InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS,
         InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS,
@@ -2083,7 +2071,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       return false;
     }
 
-    long monkeyDelay = internals.getTimeRange(
+    long monkeyDelay = SliderUtils.getTimeRange(configuration,
         InternalKeys.CHAOS_MONKEY_DELAY,
         0,
         0,
@@ -2098,10 +2086,11 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     // configure the targets
     
     // launch failure: special case with explicit failure triggered now
-    int amLaunchFailProbability = internals.getOptionInt(
+    int amLaunchFailProbability = configuration.getPropertyInt(
         InternalKeys.CHAOS_MONKEY_PROBABILITY_AM_LAUNCH_FAILURE,
         0);
-    if (amLaunchFailProbability> 0 && monkey.chaosCheck(amLaunchFailProbability)) {
+    if (amLaunchFailProbability > 0 && monkey.chaosCheck(
+        amLaunchFailProbability)) {
       log.info("Chaos Monkey has triggered AM Launch failure");
       // trigger a failure
       ActionStopSlider stop = new ActionStopSlider("stop",
@@ -2112,12 +2101,12 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       queue(stop);
     }
     
-    int amKillProbability = internals.getOptionInt(
+    int amKillProbability = configuration.getPropertyInt(
         InternalKeys.CHAOS_MONKEY_PROBABILITY_AM_FAILURE,
         InternalKeys.DEFAULT_CHAOS_MONKEY_PROBABILITY_AM_FAILURE);
     monkey.addTarget("AM killer",
         new ChaosKillAM(actionQueues, -1), amKillProbability);
-    int containerKillProbability = internals.getOptionInt(
+    int containerKillProbability = configuration.getPropertyInt(
         InternalKeys.CHAOS_MONKEY_PROBABILITY_CONTAINER_FAILURE,
         InternalKeys.DEFAULT_CHAOS_MONKEY_PROBABILITY_CONTAINER_FAILURE);
     monkey.addTarget("Container killer",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
index 220f2ca..a7b94ed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
@@ -26,9 +26,9 @@ import java.util.concurrent.TimeUnit;
 
 public class ActionFlexCluster extends AsyncAction {
 
-  final Messages.FlexComponentRequestProto requestProto;
+  final Messages.FlexComponentsRequestProto requestProto;
   public ActionFlexCluster(String name, long delay, TimeUnit timeUnit,
-      Messages.FlexComponentRequestProto requestProto) {
+      Messages.FlexComponentsRequestProto requestProto) {
     super(name, delay, timeUnit, ATTR_CHANGES_APP_SIZE);
     this.requestProto = requestProto;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java
index 510ff73..5dcbe9b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java
@@ -37,13 +37,13 @@ import static org.apache.hadoop.metrics2.lib.Interns.info;
 @Metrics(context = "yarn-native-service")
 public class SliderMetrics implements MetricsSource {
 
-  @Metric("containers pending")
-  public MutableGaugeInt containersPending;
+  @Metric("containers requested")
+  public MutableGaugeInt containersRequested;
   @Metric("anti-affinity containers pending")
   public MutableGaugeInt pendingAAContainers;
-  @Metric("containers pending")
+  @Metric("containers running")
   public MutableGaugeInt containersRunning;
-  @Metric("containers requested")
+  @Metric("containers desired")
   public MutableGaugeInt containersDesired;
   @Metric("containers completed")
   public MutableGaugeInt containersCompleted;
@@ -53,8 +53,12 @@ public class SliderMetrics implements MetricsSource {
   public MutableGaugeInt failedSinceLastThreshold;
   @Metric("containers preempted")
   public MutableGaugeInt containersPreempted;
+  @Metric("containers exceeded limits")
+  public MutableGaugeInt containersLimitsExceeded;
   @Metric("containers surplus")
   public MutableGaugeInt surplusContainers;
+  @Metric("containers failed due to disk failure")
+  public MutableGaugeInt containersDiskFailure;
 
   protected final MetricsRegistry registry;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
index 7830a1e..526ab7c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
@@ -70,11 +70,11 @@ public class SliderClusterProtocolPBImpl implements SliderClusterProtocolPB {
   }
 
   @Override
-  public Messages.FlexComponentResponseProto flexComponent(
-      RpcController controller, Messages.FlexComponentRequestProto request)
+  public Messages.FlexComponentsResponseProto flexComponents(
+      RpcController controller, Messages.FlexComponentsRequestProto request)
       throws ServiceException {
     try {
-      return real.flexComponent(request);
+      return real.flexComponents(request);
     } catch (IOException e) {
       throw wrap(e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
index 1902ec1..2e40a9b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
@@ -110,10 +110,10 @@ public class SliderClusterProtocolProxy implements SliderClusterProtocol {
   }
 
   @Override
-  public Messages.FlexComponentResponseProto flexComponent(
-      Messages.FlexComponentRequestProto request) throws IOException {
+  public Messages.FlexComponentsResponseProto flexComponents(
+      Messages.FlexComponentsRequestProto request) throws IOException {
     try {
-      return endpoint.flexComponent(NULL_CONTROLLER, request);
+      return endpoint.flexComponents(NULL_CONTROLLER, request);
     } catch (ServiceException e) {
       throw convert(e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
index eaa0a81..f88d586 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
@@ -196,11 +196,11 @@ public class SliderIPCService extends AbstractService
   }
 
   @Override
-  public Messages.FlexComponentResponseProto flexComponent(
-      Messages.FlexComponentRequestProto request) throws IOException {
+  public Messages.FlexComponentsResponseProto flexComponents(
+      Messages.FlexComponentsRequestProto request) throws IOException {
     onRpcCall("flex");
     schedule(new ActionFlexCluster("flex", 1, TimeUnit.MILLISECONDS, request));
-    return Messages.FlexComponentResponseProto.newBuilder().build();
+    return Messages.FlexComponentsResponseProto.newBuilder().build();
   }
 
   @Override //SliderClusterProtocol

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
index 37c730f..cc19eee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
@@ -16,9 +16,23 @@
  */
 package org.apache.slider.server.appmaster.security;
 
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import static org.apache.slider.core.main.LauncherExitCodes.EXIT_UNAUTHORIZED;
+
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.common.SliderXmlConfKeys;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.SliderException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.File;
+import java.io.IOException;
+
 /**
  * Class keeping code security information
  */
@@ -26,111 +40,122 @@ public class SecurityConfiguration {
 
   protected static final Logger log =
       LoggerFactory.getLogger(SecurityConfiguration.class);
+  private final Configuration configuration;
+  private final Application application;
   private String clusterName;
 
-//  private void validate() throws SliderException {
-//    if (isSecurityEnabled()) {
-//      String principal = instanceDefinition.getAppConfOperations()
-//          .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
-//      if(SliderUtils.isUnset(principal)) {
-//        // if no login identity is available, fail
-//        UserGroupInformation loginUser = null;
-//        try {
-//          loginUser = getLoginUser();
-//        } catch (IOException e) {
-//          throw new SliderException(EXIT_UNAUTHORIZED, e,
-//                                    "No principal configured for the application and "
-//                                    + "exception raised during retrieval of login user. "
-//                                    + "Unable to proceed with application "
-//                                    + "initialization.  Please ensure a value "
-//                                    + "for %s exists in the application "
-//                                    + "configuration or the login issue is addressed",
-//                                    SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
-//        }
-//        if (loginUser == null) {
-//          throw new SliderException(EXIT_UNAUTHORIZED,
-//                                    "No principal configured for the application "
-//                                    + "and no login user found. "
-//                                    + "Unable to proceed with application "
-//                                    + "initialization.  Please ensure a value "
-//                                    + "for %s exists in the application "
-//                                    + "configuration or the login issue is addressed",
-//                                    SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
-//        }
-//      }
-//      // ensure that either local or distributed keytab mechanism is enabled,
-//      // but not both
-//      String keytabFullPath = instanceDefinition.getAppConfOperations()
-//          .getComponent(SliderKeys.COMPONENT_AM)
-//          .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
-//      String keytabName = instanceDefinition.getAppConfOperations()
-//          .getComponent(SliderKeys.COMPONENT_AM)
-//          .get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
-//      if (SliderUtils.isSet(keytabFullPath) && SliderUtils.isSet(keytabName)) {
-//        throw new SliderException(EXIT_UNAUTHORIZED,
-//                                  "Both a keytab on the cluster host (%s) and a"
-//                                  + " keytab to be retrieved from HDFS (%s) are"
-//                                  + " specified.  Please configure only one keytab"
-//                                  + " retrieval mechanism.",
-//                                  SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH,
-//                                  SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
-//
-//      }
-//    }
-//  }
-//
-//  protected UserGroupInformation getLoginUser() throws IOException {
-//    return UserGroupInformation.getLoginUser();
-//  }
-//
-//  public boolean isSecurityEnabled () {
-//    return SliderUtils.isHadoopClusterSecure(configuration);
-//  }
-//
-//  public String getPrincipal () throws IOException {
-//    String principal = instanceDefinition.getAppConfOperations()
-//        .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
-//    if (SliderUtils.isUnset(principal)) {
-//      principal = UserGroupInformation.getLoginUser().getShortUserName();
-//      log.info("No principal set in the slider configuration.  Will use AM login"
-//               + " identity {} to attempt keytab-based login", principal);
-//    }
-//
-//    return principal;
-//  }
-//
-//  public boolean isKeytabProvided() {
-//    boolean keytabProvided = instanceDefinition.getAppConfOperations()
-//                    .getComponent(SliderKeys.COMPONENT_AM)
-//                    .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH) != null ||
-//                instanceDefinition.getAppConfOperations()
-//                    .getComponent(SliderKeys.COMPONENT_AM).
-//                    get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME) != null;
-//    return keytabProvided;
-//
-//  }
-//
-//  public File getKeytabFile(AggregateConf instanceDefinition)
-//      throws SliderException, IOException {
-//    //TODO implement this for dash semantic
-//    String keytabFullPath = instanceDefinition.getAppConfOperations()
-//        .getComponent(SliderKeys.COMPONENT_AM)
-//        .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
-//    File localKeytabFile;
-//    if (SliderUtils.isUnset(keytabFullPath)) {
-//      // get the keytab
-//      String keytabName = instanceDefinition.getAppConfOperations()
-//          .getComponent(SliderKeys.COMPONENT_AM).
-//              get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
-//      log.info("No host keytab file path specified. Will attempt to retrieve"
-//               + " keytab file {} as a local resource for the container",
-//               keytabName);
-//      // download keytab to local, protected directory
-//      localKeytabFile = new File(SliderKeys.KEYTAB_DIR, keytabName);
-//    } else {
-//      log.info("Using host keytab file {} for login", keytabFullPath);
-//      localKeytabFile = new File(keytabFullPath);
-//    }
-//    return localKeytabFile;
-//  }
+  public SecurityConfiguration(Configuration configuration,
+                               Application application,
+                               String clusterName) throws SliderException {
+    Preconditions.checkNotNull(configuration);
+    Preconditions.checkNotNull(application);
+    Preconditions.checkNotNull(clusterName);
+    this.configuration = configuration;
+    this.application = application;
+    this.clusterName = clusterName;
+    validate();
+  }
+
+  private void validate() throws SliderException {
+    if (isSecurityEnabled()) {
+      // TODO use AM configuration rather than app configuration
+      String principal = application.getConfiguration().getProperty(
+          SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
+      if(SliderUtils.isUnset(principal)) {
+        // if no login identity is available, fail
+        UserGroupInformation loginUser = null;
+        try {
+          loginUser = getLoginUser();
+        } catch (IOException e) {
+          throw new SliderException(EXIT_UNAUTHORIZED, e,
+              "No principal configured for the application and "
+                  + "exception raised during retrieval of login user. "
+                  + "Unable to proceed with application "
+                  + "initialization.  Please ensure a value "
+                  + "for %s exists in the application "
+                  + "configuration or the login issue is addressed",
+              SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
+        }
+        if (loginUser == null) {
+          throw new SliderException(EXIT_UNAUTHORIZED,
+              "No principal configured for the application "
+                  + "and no login user found. "
+                  + "Unable to proceed with application "
+                  + "initialization.  Please ensure a value "
+                  + "for %s exists in the application "
+                  + "configuration or the login issue is addressed",
+              SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
+        }
+      }
+      // ensure that either local or distributed keytab mechanism is enabled,
+      // but not both
+      String keytabFullPath = application.getConfiguration().getProperty(
+          SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
+      String keytabName = application.getConfiguration().getProperty(
+          SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
+      if (SliderUtils.isSet(keytabFullPath) && SliderUtils.isSet(keytabName)) {
+        throw new SliderException(EXIT_UNAUTHORIZED,
+            "Both a keytab on the cluster host (%s) and a"
+                + " keytab to be retrieved from HDFS (%s) are"
+                + " specified.  Please configure only one keytab"
+                + " retrieval mechanism.",
+            SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH,
+            SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
+
+      }
+    }
+  }
+
+  protected UserGroupInformation getLoginUser() throws IOException {
+    return UserGroupInformation.getLoginUser();
+  }
+
+  public boolean isSecurityEnabled() {
+    return SliderUtils.isHadoopClusterSecure(configuration);
+  }
+
+  public String getPrincipal() throws IOException {
+    String principal = application.getConfiguration().getProperty(
+        SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
+    if (SliderUtils.isUnset(principal)) {
+      principal = UserGroupInformation.getLoginUser().getShortUserName();
+      log.info("No principal set in the slider configuration.  Will use AM " +
+          "login identity {} to attempt keytab-based login", principal);
+    }
+
+    return principal;
+  }
+
+  public boolean isKeytabProvided() {
+    String keytabLocalPath = application.getConfiguration().getProperty(
+        SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
+    String keytabName = application.getConfiguration().getProperty(
+        SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
+    return StringUtils.isNotBlank(keytabLocalPath)
+        || StringUtils.isNotBlank(keytabName);
+
+  }
+
+  public File getKeytabFile()
+      throws SliderException, IOException {
+    //TODO implement this for dash semantic
+    String keytabFullPath = application.getConfiguration().getProperty(
+        SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
+    File localKeytabFile;
+    if (SliderUtils.isUnset(keytabFullPath)) {
+      // get the keytab
+      String keytabName = application.getConfiguration().getProperty(
+          SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
+      log.info("No host keytab file path specified. Will attempt to retrieve"
+               + " keytab file {} as a local resource for the container",
+               keytabName);
+      // download keytab to local, protected directory
+      localKeytabFile = new File(SliderKeys.KEYTAB_DIR, keytabName);
+    } else {
+      log.info("Using host keytab file {} for login", keytabFullPath);
+      localKeytabFile = new File(keytabFullPath);
+    }
+    return localKeytabFile;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
index 3d73f3b..43c7ead 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
@@ -38,6 +38,7 @@ import org.apache.slider.api.ClusterNode;
 import org.apache.slider.api.InternalKeys;
 import org.apache.slider.api.StatusKeys;
 import org.apache.slider.api.proto.Messages;
+import org.apache.slider.api.proto.Messages.ComponentCountProto;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.ApplicationState;
 import org.apache.slider.api.resource.Component;
@@ -219,7 +220,13 @@ public class AppState {
     return roleStatusMap;
   }
   
+  protected Map<String, ProviderRole> getRoleMap() {
+    return roles;
+  }
 
+  public Map<Integer, ProviderRole> getRolePriorityMap() {
+    return rolePriorityMap;
+  }
 
   private Map<ContainerId, RoleInstance> getStartingContainers() {
     return startingContainers;
@@ -257,6 +264,11 @@ public class AppState {
     return roleHistory;
   }
 
+  @VisibleForTesting
+  public void setRoleHistory(RoleHistory roleHistory) {
+    this.roleHistory = roleHistory;
+  }
+
   /**
    * Get the path used for history files
    * @return the directory used for history files
@@ -306,6 +318,15 @@ public class AppState {
     appMetrics
         .tag("appId", "Application id for service", app.getId());
 
+    org.apache.slider.api.resource.Configuration conf = app.getConfiguration();
+    startTimeThreshold =
+        conf.getPropertyLong(InternalKeys.INTERNAL_CONTAINER_FAILURE_SHORTLIFE,
+            InternalKeys.DEFAULT_INTERNAL_CONTAINER_FAILURE_SHORTLIFE);
+    failureThreshold = conf.getPropertyInt(CONTAINER_FAILURE_THRESHOLD,
+        DEFAULT_CONTAINER_FAILURE_THRESHOLD);
+    nodeFailureThreshold = conf.getPropertyInt(NODE_FAILURE_THRESHOLD,
+        DEFAULT_NODE_FAILURE_THRESHOLD);
+
     //build the initial role list
     List<ProviderRole> roleList = new ArrayList<>(binding.roles);
     for (ProviderRole providerRole : roleList) {
@@ -314,6 +335,7 @@ public class AppState {
 
     int priority = 1;
     for (Component component : app.getComponents()) {
+      priority = getNewPriority(priority);
       String name = component.getName();
       if (roles.containsKey(name)) {
         continue;
@@ -324,22 +346,13 @@ public class AppState {
       }
       log.info("Adding component: " + name);
       ProviderRole dynamicRole =
-          createComponent(name, name, component, priority++);
+          createComponent(name, name, component, priority);
       buildRole(dynamicRole);
       roleList.add(dynamicRole);
     }
     //then pick up the requirements
     buildRoleRequirementsFromResources();
 
-    org.apache.slider.api.resource.Configuration conf = app.getConfiguration();
-    startTimeThreshold =
-        conf.getPropertyLong(InternalKeys.INTERNAL_CONTAINER_FAILURE_SHORTLIFE,
-            InternalKeys.DEFAULT_INTERNAL_CONTAINER_FAILURE_SHORTLIFE);
-    failureThreshold = (int) conf.getPropertyLong(CONTAINER_FAILURE_THRESHOLD,
-        DEFAULT_CONTAINER_FAILURE_THRESHOLD);
-    nodeFailureThreshold = (int) conf.getPropertyLong(NODE_FAILURE_THRESHOLD,
-        DEFAULT_NODE_FAILURE_THRESHOLD);
-
     // set up the role history
     roleHistory = new RoleHistory(roleStatusMap.values(), recordFactory);
     roleHistory.onStart(binding.fs, binding.historyPath);
@@ -359,34 +372,47 @@ public class AppState {
   //TODO WHY do we need to create the component for AM ?
   public ProviderRole createComponent(String name, String group,
       Component component, int priority) throws BadConfigException {
-
     org.apache.slider.api.resource.Configuration conf =
         component.getConfiguration();
     long placementTimeout = conf.getPropertyLong(PLACEMENT_ESCALATE_DELAY,
         DEFAULT_PLACEMENT_ESCALATE_DELAY_SECONDS);
     long placementPolicy = conf.getPropertyLong(COMPONENT_PLACEMENT_POLICY,
         PlacementPolicy.DEFAULT);
-    int threshold = (int) conf
-        .getPropertyLong(NODE_FAILURE_THRESHOLD, nodeFailureThreshold);
+    int threshold = conf.getPropertyInt(NODE_FAILURE_THRESHOLD,
+        nodeFailureThreshold);
+    String label = conf.getProperty(YARN_LABEL_EXPRESSION,
+        DEF_YARN_LABEL_EXPRESSION);
     ProviderRole newRole =
         new ProviderRole(name, group, priority, (int)placementPolicy, threshold,
-            placementTimeout, "", component);
+            placementTimeout, label, component);
 
     log.info("Created a new role " + newRole);
     return newRole;
   }
 
-  public synchronized void updateComponents(
-      Messages.FlexComponentRequestProto requestProto)
-      throws BadConfigException {
+  @VisibleForTesting
+  public synchronized List<ProviderRole> updateComponents(Map<String, Long>
+      componentCounts) throws BadConfigException {
     for (Component component : app.getComponents()) {
-      if (component.getName().equals(requestProto.getName())) {
-        component
-            .setNumberOfContainers((long) requestProto.getNumberOfContainers());
+      if (componentCounts.containsKey(component.getName())) {
+        component.setNumberOfContainers(componentCounts.get(component
+            .getName()));
       }
     }
     //TODO update cluster description
-    buildRoleRequirementsFromResources();
+    return buildRoleRequirementsFromResources();
+  }
+
+  public synchronized List<ProviderRole> updateComponents(
+      Messages.FlexComponentsRequestProto requestProto)
+      throws BadConfigException {
+    Map<String, Long> componentCounts = new HashMap<>();
+    for (ComponentCountProto componentCount : requestProto
+        .getComponentsList()) {
+      componentCounts.put(componentCount.getName(), componentCount
+          .getNumberOfContainers());
+    }
+    return updateComponents(componentCounts);
   }
 
   /**
@@ -445,10 +471,8 @@ public class AppState {
     // now the dynamic ones. Iterate through the the cluster spec and
     // add any role status entries not in the role status
 
-    List<RoleStatus> list = new ArrayList<>(getRoleStatusMap().values());
-    for (RoleStatus roleStatus : list) {
-      String name = roleStatus.getName();
-      Component component = roleStatus.getProviderRole().component;
+    for (Component component : app.getComponents()) {
+      String name = component.getName();
       if (roles.containsKey(name)) {
         continue;
       }
@@ -460,10 +484,12 @@ public class AppState {
           groupCount = groupCounts.get(name);
         }
         for (int i = groupCount + 1; i <= desiredInstanceCount; i++) {
-          int priority = roleStatus.getPriority();
           // this is a new instance of an existing group
           String newName = String.format("%s%d", name, i);
-          int newPriority = getNewPriority(priority + i - 1);
+          if (roles.containsKey(newName)) {
+            continue;
+          }
+          int newPriority = getNewPriority(i);
           log.info("Adding new role {}", newName);
           ProviderRole dynamicRole =
               createComponent(newName, name, component, newPriority);
@@ -477,11 +503,12 @@ public class AppState {
         }
       } else {
         // this is a new value
-        log.info("Adding new role {}", name);
+        log.info("Adding new role {}, num containers {}", name,
+            component.getNumberOfContainers());
         ProviderRole dynamicRole =
-            createComponent(name, name, component, roleStatus.getPriority());
+            createComponent(name, name, component, getNewPriority(1));
         RoleStatus newRole = buildRole(dynamicRole);
-        incDesiredContainers(roleStatus,
+        incDesiredContainers(newRole,
             component.getNumberOfContainers().intValue());
         log.info("New role {}", newRole);
         if (roleHistory != null) {
@@ -518,7 +545,8 @@ public class AppState {
     if (roleStatusMap.containsKey(priority)) {
       throw new BadConfigException("Duplicate Provider Key: %s and %s",
                                    providerRole,
-                                   roleStatusMap.get(priority));
+                                   roleStatusMap.get(priority)
+                                       .getProviderRole());
     }
     RoleStatus roleStatus = new RoleStatus(providerRole);
     roleStatusMap.put(priority, roleStatus);
@@ -536,6 +564,8 @@ public class AppState {
   private void buildRoleResourceRequirements() {
     for (RoleStatus role : roleStatusMap.values()) {
       role.setResourceRequirements(buildResourceRequirements(role));
+      log.info("Setting resource requirements for {} to {}", role.getName(),
+          role.getResourceRequirements());
     }
   }
   /**
@@ -827,7 +857,6 @@ public class AppState {
    * @return the container request to submit or null if there is none
    */
   private AMRMClient.ContainerRequest createContainerRequest(RoleStatus role) {
-    incPendingContainers(role);
     if (role.isAntiAffinePlacement()) {
       return createAAContainerRequest(role);
     } else {
@@ -857,28 +886,58 @@ public class AppState {
     return request.getIssuedRequest();
   }
 
-  private void incPendingContainers(RoleStatus role) {
-    role.getComponentMetrics().containersPending.incr();
-    appMetrics.containersPending.incr();
+  @VisibleForTesting
+  public void incRequestedContainers(RoleStatus role) {
+    log.info("Incrementing requested containers for {}", role.getName());
+    role.getComponentMetrics().containersRequested.incr();
+    appMetrics.containersRequested.incr();
   }
 
-  private void decPendingContainers(RoleStatus role) {
-    decPendingContainers(role, 1);
+  private void decRequestedContainers(RoleStatus role) {
+    role.getComponentMetrics().containersRequested.decr();
+    appMetrics.containersRequested.decr();
+    log.info("Decrementing requested containers for {} by {} to {}", role
+        .getName(), 1, role.getComponentMetrics().containersRequested.value());
   }
 
-  private void decPendingContainers(RoleStatus role, int n) {
-    role.getComponentMetrics().containersPending.decr(n);;
-    appMetrics.containersPending.decr(n);
+  private int decRequestedContainersToFloor(RoleStatus role, int delta) {
+    int actual = decMetricToFloor(role.getComponentMetrics()
+        .containersRequested, delta);
+    appMetrics.containersRequested.decr(actual);
+    log.info("Decrementing requested containers for {} by {} to {}", role
+            .getName(), actual, role.getComponentMetrics().containersRequested
+        .value());
+    return actual;
   }
 
+  private int decAAPendingToFloor(RoleStatus role, int delta) {
+    int actual = decMetricToFloor(role.getComponentMetrics()
+        .pendingAAContainers, delta);
+    appMetrics.pendingAAContainers.decr(actual);
+    log.info("Decrementing AA pending containers for {} by {} to {}", role
+        .getName(), actual, role.getComponentMetrics().pendingAAContainers
+        .value());
+    return actual;
+  }
 
-  private void incRunningContainers(RoleStatus role) {
-    role.getComponentMetrics().containersRunning.incr();;
+  private int decMetricToFloor(MutableGaugeInt metric, int delta) {
+    int currentValue = metric.value();
+    int decrAmount = delta;
+    if (currentValue - delta < 0) {
+      decrAmount = currentValue;
+    }
+    metric.decr(decrAmount);
+    return decrAmount;
+  }
+
+  @VisibleForTesting
+  public void incRunningContainers(RoleStatus role) {
+    role.getComponentMetrics().containersRunning.incr();
     appMetrics.containersRunning.incr();
   }
 
   private void decRunningContainers(RoleStatus role) {
-    role.getComponentMetrics().containersRunning.decr();;
+    role.getComponentMetrics().containersRunning.decr();
     appMetrics.containersRunning.decr();
   }
 
@@ -902,26 +961,47 @@ public class AppState {
     appMetrics.containersCompleted.incr();
   }
 
-  private void incFailedContainers(RoleStatus role, ContainerOutcome outcome) {
-    role.getComponentMetrics().containersFailed.incr();
-    appMetrics.containersFailed.incr();
+  @VisibleForTesting
+  public void incFailedContainers(RoleStatus role, ContainerOutcome outcome) {
     switch (outcome) {
     case Preempted:
       appMetrics.containersPreempted.incr();
       role.getComponentMetrics().containersPreempted.incr();
       break;
+    case Disk_failure:
+      appMetrics.containersDiskFailure.incr();
+      appMetrics.containersFailed.incr();
+      role.getComponentMetrics().containersDiskFailure.incr();
+      role.getComponentMetrics().containersFailed.incr();
+      break;
     case Failed:
       appMetrics.failedSinceLastThreshold.incr();
+      appMetrics.containersFailed.incr();
+      role.getComponentMetrics().failedSinceLastThreshold.incr();
+      role.getComponentMetrics().containersFailed.incr();
+      break;
+    case Failed_limits_exceeded:
+      appMetrics.containersLimitsExceeded.incr();
+      appMetrics.failedSinceLastThreshold.incr();
+      appMetrics.containersFailed.incr();
+      role.getComponentMetrics().containersLimitsExceeded.incr();
+      role.getComponentMetrics().failedSinceLastThreshold.incr();
+      role.getComponentMetrics().containersFailed.incr();
       break;
     default:
+      appMetrics.failedSinceLastThreshold.incr();
+      appMetrics.containersFailed.incr();
+      role.getComponentMetrics().failedSinceLastThreshold.incr();
+      role.getComponentMetrics().containersFailed.incr();
       break;
     }
   }
 
   /**
-   * Build up the resource requirements for this role from the
-   * cluster specification, including substituing max allowed values
-   * if the specification asked for it.
+   * Build up the resource requirements for this role from the cluster
+   * specification, including substituting max allowed values if the
+   * specification asked for it (except when
+   * {@link ResourceKeys#YARN_RESOURCE_NORMALIZATION_ENABLED} is set to false).
    * @param role role
    * during normalization
    */
@@ -934,17 +1014,36 @@ public class AppState {
       // TODO why do we need to create the component for AM ?
       return Resource.newInstance(1, 512);
     }
-    int cores = Math.min(containerMaxCores, component.getResource().getCpus());
+    int cores = DEF_YARN_CORES;
+    if (component.getResource() != null && component.getResource().getCpus()
+        != null) {
+      cores = Math.min(containerMaxCores, component.getResource().getCpus());
+    }
     if (cores <= 0) {
       cores = DEF_YARN_CORES;
     }
-    long mem = Math.min(containerMaxMemory,
-        Long.parseLong(component.getResource().getMemory()));
+    long rawMem = DEF_YARN_MEMORY;
+    if (component.getResource() != null && component.getResource().getMemory()
+        != null) {
+      if (YARN_RESOURCE_MAX.equals(component.getResource().getMemory())) {
+        rawMem = containerMaxMemory;
+      } else {
+        rawMem = Long.parseLong(component.getResource().getMemory());
+      }
+    }
+    boolean normalize = component.getConfiguration().getPropertyBool(
+        YARN_RESOURCE_NORMALIZATION_ENABLED, true);
+    if (!normalize) {
+      log.info("Resource normalization: disabled");
+      log.debug("Component {} has RAM={}, vCores={}", name, rawMem, cores);
+      return Resources.createResource(rawMem, cores);
+    }
+    long mem = Math.min(containerMaxMemory, rawMem);
     if (mem <= 0) {
       mem = DEF_YARN_MEMORY;
     }
     Resource capability = Resource.newInstance(mem, cores);
-    log.debug("Component {} has RAM={}, vCores ={}", name, mem, cores);
+    log.debug("Component {} has RAM={}, vCores={}", name, mem, cores);
     Resource normalized = recordFactory.normalize(capability, minResource,
         maxResource);
     if (!Resources.equals(normalized, capability)) {
@@ -1060,7 +1159,7 @@ public class AppState {
       log.debug("Created {} cancel requests", operations.size());
       return new NodeUpdatedOutcome(true, operations);
     }
-    return new NodeUpdatedOutcome(false, new ArrayList<AbstractRMOperation>(0));
+    return new NodeUpdatedOutcome(false, new ArrayList<>(0));
   }
 
   /**
@@ -1203,7 +1302,6 @@ public class AppState {
             message = String.format("Failure %s (%d)", containerId, exitStatus);
           }
           roleStatus.noteFailed(message);
-          incFailedContainers(roleStatus, result.outcome);
           long failed =
               roleStatus.getComponentMetrics().containersFailed.value();
           log.info("Current count of failed role[{}] {} =  {}",
@@ -1409,7 +1507,7 @@ public class AppState {
           role.getName(), failures, threshold);
     }
 
-    if (failures > threshold) {
+    if (threshold > 0 && failures > threshold) {
       throw new TriggerClusterTeardownException(
           SliderExitCodes.EXIT_DEPLOYMENT_FAILED, FinalApplicationStatus.FAILED,
           ErrorStrings.E_UNSTABLE_CLUSTER
@@ -1428,7 +1526,7 @@ public class AppState {
   private int getFailureThresholdForRole(RoleStatus roleStatus) {
     return (int) roleStatus.getProviderRole().component.getConfiguration()
         .getPropertyLong(CONTAINER_FAILURE_THRESHOLD,
-            DEFAULT_CONTAINER_FAILURE_THRESHOLD);
+            failureThreshold);
   }
 
 
@@ -1497,7 +1595,8 @@ public class AppState {
     }
 
     log.info("Reviewing {} : ", role);
-    log.debug("Expected {}, Delta: {}", expected, delta);
+    log.debug("Expected {}, Requested/Running {}, Delta: {}", expected,
+        role.getActualAndRequested(), delta);
     checkFailureThreshold(role);
 
     if (expected < 0 ) {
@@ -1526,7 +1625,7 @@ public class AppState {
               pending--;
               log.info("Starting an anti-affine request sequence for {} nodes; pending={}",
                 delta, pending);
-              addContainerRequest(operations, request);
+              addContainerRequest(operations, request, role);
             } else {
               log.info("No location for anti-affine request");
             }
@@ -1536,12 +1635,12 @@ public class AppState {
         }
         log.info("Setting pending to {}", pending);
         //TODO
-        role.setAAPending((int)pending);
+        role.setAAPending(pending);
       } else {
 
         for (int i = 0; i < delta; i++) {
           //get the role history to select a suitable node, if available
-          addContainerRequest(operations, createContainerRequest(role));
+          addContainerRequest(operations, createContainerRequest(role), role);
         }
       }
     } else if (delta < 0) {
@@ -1552,25 +1651,35 @@ public class AppState {
       long excess = -delta;
 
       // how many requests are outstanding? for AA roles, this includes pending
-      long outstandingRequests = role.getPending() + role.getAAPending();
+      long outstandingRequests = role.getRequested() + role.getAAPending();
       if (outstandingRequests > 0) {
         // outstanding requests.
         int toCancel = (int)Math.min(outstandingRequests, excess);
 
+        int pendingCancelled = 0;
+        if (role.getAAPending() > 0) {
+          pendingCancelled = decAAPendingToFloor(role, toCancel);
+        }
+        int remainingToCancel = toCancel - pendingCancelled;
+
         // Delegate to Role History
-        List<AbstractRMOperation> cancellations = roleHistory.cancelRequestsForRole(role, toCancel);
+        List<AbstractRMOperation> cancellations = roleHistory
+            .cancelRequestsForRole(role, remainingToCancel);
         log.info("Found {} outstanding requests to cancel", cancellations.size());
         operations.addAll(cancellations);
-        if (toCancel != cancellations.size()) {
+        if (remainingToCancel != cancellations.size()) {
           log.error("Tracking of outstanding requests is not in sync with the summary statistics:" +
               " expected to be able to cancel {} requests, but got {}",
-              toCancel, cancellations.size());
+              remainingToCancel, cancellations.size());
         }
-        decPendingContainers(role, toCancel);
-        excess -= toCancel;
+
+        int requestCancelled = decRequestedContainersToFloor(role,
+            remainingToCancel);
+        excess -= pendingCancelled;
+        excess -= requestCancelled;
         assert excess >= 0 : "Attempted to cancel too many requests";
         log.info("Submitted {} cancellations, leaving {} to release",
-            toCancel, excess);
+            pendingCancelled + requestCancelled, excess);
         if (excess == 0) {
           log.info("After cancelling requests, application is now at desired size");
         }
@@ -1645,7 +1754,7 @@ public class AppState {
    * @return true if a request was added
    */
   private boolean addContainerRequest(List<AbstractRMOperation> operations,
-      AMRMClient.ContainerRequest containerAsk) {
+      AMRMClient.ContainerRequest containerAsk, RoleStatus role) {
     if (containerAsk != null) {
       log.info("Container ask is {} and label = {}", containerAsk,
           containerAsk.getNodeLabelExpression());
@@ -1654,6 +1763,7 @@ public class AppState {
         log.warn("Memory requested: {} > max of {}", askMemory, containerMaxMemory);
       }
       operations.add(new ContainerRequestOperation(containerAsk));
+      incRequestedContainers(role);
       return true;
     } else {
       return false;
@@ -1727,6 +1837,8 @@ public class AppState {
       List<Container> allocatedContainers,
       List<ContainerAssignment> assignments,
       List<AbstractRMOperation> operations) {
+    assignments.clear();
+    operations.clear();
     List<Container> ordered = roleHistory.prepareAllocationList(allocatedContainers);
     log.info("onContainersAllocated(): Total containers allocated = {}", ordered.size());
     for (Container container : ordered) {
@@ -1735,13 +1847,13 @@ public class AppState {
       //get the role
       final ContainerId cid = container.getId();
       final RoleStatus role = lookupRoleStatus(container);
-      decPendingContainers(role);
+      decRequestedContainers(role);
 
       //inc allocated count -this may need to be dropped in a moment,
       // but us needed to update the logic below
       MutableGaugeInt containersRunning = role.getComponentMetrics().containersRunning;
-      final long allocated = containersRunning.value();
       incRunningContainers(role);
+      final long allocated = containersRunning.value();
       final long desired = role.getDesired();
 
       final String roleName = role.getName();
@@ -1778,7 +1890,8 @@ public class AppState {
           if (role.getAAPending() > 0) {
             // still an outstanding AA request: need to issue a new one.
             log.info("Asking for next container for AA role {}", roleName);
-            if (!addContainerRequest(operations, createAAContainerRequest(role))) {
+            if (!addContainerRequest(operations, createAAContainerRequest(role),
+                role)) {
               log.info("No capacity in cluster for new requests");
             } else {
               role.decAAPending();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: YARN-6405. Improve configuring services through REST API. Contributed by Jian He

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
index 43c7ead..9f7b4a8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
@@ -20,7 +20,13 @@ package org.apache.slider.server.appmaster.state;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.yarn.api.records.Container;
@@ -42,6 +48,7 @@ import org.apache.slider.api.proto.Messages.ComponentCountProto;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.ApplicationState;
 import org.apache.slider.api.resource.Component;
+import org.apache.slider.api.resource.ConfigFile;
 import org.apache.slider.api.types.ApplicationLivenessInformation;
 import org.apache.slider.api.types.ComponentInformation;
 import org.apache.slider.api.types.RoleStatistics;
@@ -79,6 +86,7 @@ import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import static org.apache.slider.api.ResourceKeys.*;
@@ -99,7 +107,6 @@ public class AppState {
   private final AbstractClusterServices recordFactory;
 
   private final MetricsAndMonitoring metricsAndMonitoring;
-
   /**
    * Flag set to indicate the application is live -this only happens
    * after the buildInstance operation
@@ -108,9 +115,11 @@ public class AppState {
 
   private Application app;
 
+  // priority_id -> RoleStatus
   private final Map<Integer, RoleStatus> roleStatusMap =
     new ConcurrentSkipListMap<>();
 
+  // component_name -> ProviderRole
   private final Map<String, ProviderRole> roles =
     new ConcurrentHashMap<>();
 
@@ -202,6 +211,10 @@ public class AppState {
   private SliderMetrics appMetrics;
 
   private ServiceTimelinePublisher serviceTimelinePublisher;
+
+  // A cache for loading config files from remote such as hdfs
+  public LoadingCache<ConfigFile, Object> configFileCache = null;
+
   /**
    * Create an instance
    * @param recordFactory factory for YARN records
@@ -304,8 +317,6 @@ public class AppState {
   public synchronized void buildInstance(AppStateBindingInfo binding)
       throws BadClusterStateException, BadConfigException, IOException {
     binding.validate();
-
-    log.debug("Building application state");
     containerReleaseSelector = binding.releaseSelector;
 
     // set the cluster specification (once its dependency the client properties
@@ -313,10 +324,8 @@ public class AppState {
     this.app = binding.application;
     appMetrics = SliderMetrics.register(app.getName(),
         "Metrics for service");
-    appMetrics
-        .tag("type", "Metrics type [component or service]", "service");
-    appMetrics
-        .tag("appId", "Application id for service", app.getId());
+    appMetrics.tag("type", "Metrics type [component or service]", "service");
+    appMetrics.tag("appId", "Application id for service", app.getId());
 
     org.apache.slider.api.resource.Configuration conf = app.getConfiguration();
     startTimeThreshold =
@@ -327,12 +336,7 @@ public class AppState {
     nodeFailureThreshold = conf.getPropertyInt(NODE_FAILURE_THRESHOLD,
         DEFAULT_NODE_FAILURE_THRESHOLD);
 
-    //build the initial role list
-    List<ProviderRole> roleList = new ArrayList<>(binding.roles);
-    for (ProviderRole providerRole : roleList) {
-      buildRole(providerRole);
-    }
-
+    //build the initial component list
     int priority = 1;
     for (Component component : app.getComponents()) {
       priority = getNewPriority(priority);
@@ -340,25 +344,18 @@ public class AppState {
       if (roles.containsKey(name)) {
         continue;
       }
-      if (component.getUniqueComponentSupport()) {
-        log.info("Skipping group " + name + ", as it's unique component");
-        continue;
-      }
       log.info("Adding component: " + name);
-      ProviderRole dynamicRole =
-          createComponent(name, name, component, priority);
-      buildRole(dynamicRole);
-      roleList.add(dynamicRole);
+      createComponent(name, name, component, priority++);
     }
+
     //then pick up the requirements
-    buildRoleRequirementsFromResources();
+//    buildRoleRequirementsFromResources();
 
     // set up the role history
     roleHistory = new RoleHistory(roleStatusMap.values(), recordFactory);
     roleHistory.onStart(binding.fs, binding.historyPath);
     // trigger first node update
     roleHistory.onNodesUpdated(binding.nodeReports);
-
     //rebuild any live containers
     rebuildModelFromRestart(binding.liveContainers);
 
@@ -367,9 +364,39 @@ public class AppState {
     //mark as live
     applicationLive = true;
     app.setState(STARTED);
+    createConfigFileCache(binding.fs);
+  }
+
+  private void createConfigFileCache(final FileSystem fileSystem) {
+    this.configFileCache =
+        CacheBuilder.newBuilder().expireAfterAccess(10, TimeUnit.MINUTES)
+            .build(new CacheLoader<ConfigFile, Object>() {
+              @Override public Object load(ConfigFile key) throws Exception {
+                switch (key.getType()) {
+                case HADOOP_XML:
+                  try (FSDataInputStream input = fileSystem
+                      .open(new Path(key.getSrcFile()))) {
+                    org.apache.hadoop.conf.Configuration confRead =
+                        new org.apache.hadoop.conf.Configuration(false);
+                    confRead.addResource(input);
+                    Map<String, String> map = new HashMap<>(confRead.size());
+                    for (Map.Entry<String, String> entry : confRead) {
+                      map.put(entry.getKey(), entry.getValue());
+                    }
+                    return map;
+                  }
+                case TEMPLATE:
+                  try (FSDataInputStream fileInput = fileSystem
+                      .open(new Path(key.getSrcFile()))) {
+                    return IOUtils.toString(fileInput);
+                  }
+                default:
+                  return null;
+                }
+              }
+            });
   }
 
-  //TODO WHY do we need to create the component for AM ?
   public ProviderRole createComponent(String name, String group,
       Component component, int priority) throws BadConfigException {
     org.apache.slider.api.resource.Configuration conf =
@@ -384,26 +411,28 @@ public class AppState {
         DEF_YARN_LABEL_EXPRESSION);
     ProviderRole newRole =
         new ProviderRole(name, group, priority, (int)placementPolicy, threshold,
-            placementTimeout, label, component);
-
+            placementTimeout, label, component, this);
+    buildRole(newRole, component);
     log.info("Created a new role " + newRole);
     return newRole;
   }
 
   @VisibleForTesting
-  public synchronized List<ProviderRole> updateComponents(Map<String, Long>
+  public synchronized void updateComponents(Map<String, Long>
       componentCounts) throws BadConfigException {
     for (Component component : app.getComponents()) {
       if (componentCounts.containsKey(component.getName())) {
-        component.setNumberOfContainers(componentCounts.get(component
-            .getName()));
+        long count = componentCounts.get(component.getName());
+        component.setNumberOfContainers(count);
+        ProviderRole role = roles.get(component.getName());
+        if (role != null && roleStatusMap.get(role.id) != null) {
+          setDesiredContainers(roleStatusMap.get(role.id), (int) count);
+        }
       }
     }
-    //TODO update cluster description
-    return buildRoleRequirementsFromResources();
   }
 
-  public synchronized List<ProviderRole> updateComponents(
+  public synchronized void updateComponents(
       Messages.FlexComponentsRequestProto requestProto)
       throws BadConfigException {
     Map<String, Long> componentCounts = new HashMap<>();
@@ -412,116 +441,119 @@ public class AppState {
       componentCounts.put(componentCount.getName(), componentCount
           .getNumberOfContainers());
     }
-    return updateComponents(componentCounts);
+    updateComponents(componentCounts);
   }
 
   /**
    * build the role requirements from the cluster specification
    * @return a list of any dynamically added provider roles
    */
-  private List<ProviderRole> buildRoleRequirementsFromResources()
-      throws BadConfigException {
-
-    List<ProviderRole> newRoles = new ArrayList<>(0);
-
-    // now update every role's desired count.
-    // if there are no instance values, that role count goes to zero
-    // Add all the existing roles
-    // component name -> number of containers
-    Map<String, Integer> groupCounts = new HashMap<>();
-
-    for (RoleStatus roleStatus : getRoleStatusMap().values()) {
-      if (roleStatus.isExcludeFromFlexing()) {
-        // skip inflexible roles, e.g AM itself
-        continue;
-      }
-      long currentDesired = roleStatus.getDesired();
-      String role = roleStatus.getName();
-      String roleGroup = roleStatus.getGroup();
-      Component component = roleStatus.getProviderRole().component;
-      int desiredInstanceCount = component.getNumberOfContainers().intValue();
-
-      int newDesired = desiredInstanceCount;
-      if (component.getUniqueComponentSupport()) {
-        Integer groupCount = 0;
-        if (groupCounts.containsKey(roleGroup)) {
-          groupCount = groupCounts.get(roleGroup);
-        }
-
-        newDesired = desiredInstanceCount - groupCount;
-
-        if (newDesired > 0) {
-          newDesired = 1;
-          groupCounts.put(roleGroup, groupCount + newDesired);
-        } else {
-          newDesired = 0;
-        }
-      }
-
-      if (newDesired == 0) {
-        log.info("Role {} has 0 instances specified", role);
-      }
-      if (currentDesired != newDesired) {
-        log.info("Role {} flexed from {} to {}", role, currentDesired,
-            newDesired);
-        setDesiredContainers(roleStatus, newDesired);
-      }
-    }
-
-    // now the dynamic ones. Iterate through the the cluster spec and
-    // add any role status entries not in the role status
-
-    for (Component component : app.getComponents()) {
-      String name = component.getName();
-      if (roles.containsKey(name)) {
-        continue;
-      }
-      if (component.getUniqueComponentSupport()) {
-        // THIS NAME IS A GROUP
-        int desiredInstanceCount = component.getNumberOfContainers().intValue();
-        Integer groupCount = 0;
-        if (groupCounts.containsKey(name)) {
-          groupCount = groupCounts.get(name);
-        }
-        for (int i = groupCount + 1; i <= desiredInstanceCount; i++) {
-          // this is a new instance of an existing group
-          String newName = String.format("%s%d", name, i);
-          if (roles.containsKey(newName)) {
-            continue;
-          }
-          int newPriority = getNewPriority(i);
-          log.info("Adding new role {}", newName);
-          ProviderRole dynamicRole =
-              createComponent(newName, name, component, newPriority);
-          RoleStatus newRole = buildRole(dynamicRole);
-          incDesiredContainers(newRole);
-          log.info("New role {}", newRole);
-          if (roleHistory != null) {
-            roleHistory.addNewRole(newRole);
-          }
-          newRoles.add(dynamicRole);
-        }
-      } else {
-        // this is a new value
-        log.info("Adding new role {}, num containers {}", name,
-            component.getNumberOfContainers());
-        ProviderRole dynamicRole =
-            createComponent(name, name, component, getNewPriority(1));
-        RoleStatus newRole = buildRole(dynamicRole);
-        incDesiredContainers(newRole,
-            component.getNumberOfContainers().intValue());
-        log.info("New role {}", newRole);
-        if (roleHistory != null) {
-          roleHistory.addNewRole(newRole);
-        }
-        newRoles.add(dynamicRole);
-      }
-    }
-    // and fill in all those roles with their requirements
-    buildRoleResourceRequirements();
 
-    return newRoles;
-  }
+//  private List<ProviderRole> buildRoleRequirementsFromResources()
+//      throws BadConfigException {
+//
+//    List<ProviderRole> newRoles = new ArrayList<>(0);
+//
+//    // now update every role's desired count.
+//    // if there are no instance values, that role count goes to zero
+//    // Add all the existing roles
+//    // component name -> number of containers
+//    Map<String, Integer> groupCounts = new HashMap<>();
+//
+//    for (RoleStatus roleStatus : getRoleStatusMap().values()) {
+//      if (roleStatus.isExcludeFromFlexing()) {
+//        // skip inflexible roles, e.g AM itself
+//        continue;
+//      }
+//      long currentDesired = roleStatus.getDesired();
+//      String role = roleStatus.getName();
+//      String roleGroup = roleStatus.getGroup();
+//      Component component = roleStatus.getProviderRole().component;
+//      int desiredInstanceCount = component.getNumberOfContainers().intValue();
+//
+//      int newDesired = desiredInstanceCount;
+//      if (component.getUniqueComponentSupport()) {
+//        Integer groupCount = 0;
+//        if (groupCounts.containsKey(roleGroup)) {
+//          groupCount = groupCounts.get(roleGroup);
+//        }
+//
+//        newDesired = desiredInstanceCount - groupCount;
+//
+//        if (newDesired > 0) {
+//          newDesired = 1;
+//          groupCounts.put(roleGroup, groupCount + newDesired);
+//        } else {
+//          newDesired = 0;
+//        }
+//      }
+//
+//      if (newDesired == 0) {
+//        log.info("Role {} has 0 instances specified", role);
+//      }
+//      if (currentDesired != newDesired) {
+//        log.info("Role {} flexed from {} to {}", role, currentDesired,
+//            newDesired);
+//        setDesiredContainers(roleStatus, newDesired);
+//      }
+//    }
+//
+//    log.info("Counts per component: " + groupCounts);
+//    // now the dynamic ones. Iterate through the the cluster spec and
+//    // add any role status entries not in the role status
+//
+//    List<RoleStatus> list = new ArrayList<>(getRoleStatusMap().values());
+//    for (RoleStatus roleStatus : list) {
+//      String name = roleStatus.getName();
+//      Component component = roleStatus.getProviderRole().component;
+//      if (roles.containsKey(name)) {
+//        continue;
+//      }
+//      if (component.getUniqueComponentSupport()) {
+//        // THIS NAME IS A GROUP
+//        int desiredInstanceCount = component.getNumberOfContainers().intValue();
+//        Integer groupCount = 0;
+//        if (groupCounts.containsKey(name)) {
+//          groupCount = groupCounts.get(name);
+//        }
+//        log.info("Component " + component.getName() + ", current count = "
+//            + groupCount + ", desired count = " + desiredInstanceCount);
+//        for (int i = groupCount + 1; i <= desiredInstanceCount; i++) {
+//          int priority = roleStatus.getPriority();
+//          // this is a new instance of an existing group
+//          String newName = String.format("%s%d", name, i);
+//          int newPriority = getNewPriority(priority + i - 1);
+//          log.info("Adding new role {}", newName);
+//          ProviderRole dynamicRole =
+//              createComponent(newName, name, component, newPriority);
+//          RoleStatus newRole = buildRole(dynamicRole);
+//          incDesiredContainers(newRole);
+//          log.info("New role {}", newRole);
+//          if (roleHistory != null) {
+//            roleHistory.addNewRole(newRole);
+//          }
+//          newRoles.add(dynamicRole);
+//        }
+//      } else {
+//        // this is a new value
+//        log.info("Adding new role {}", name);
+//        ProviderRole dynamicRole =
+//            createComponent(name, name, component, roleStatus.getPriority());
+//        RoleStatus newRole = buildRole(dynamicRole);
+//        incDesiredContainers(roleStatus,
+//            component.getNumberOfContainers().intValue());
+//        log.info("New role {}", newRole);
+//        if (roleHistory != null) {
+//          roleHistory.addNewRole(newRole);
+//        }
+//        newRoles.add(dynamicRole);
+//      }
+//    }
+//    // and fill in all those roles with their requirements
+//    buildRoleResourceRequirements();
+//
+//    return newRoles;
+//  }
 
   private int getNewPriority(int start) {
     if (!rolePriorityMap.containsKey(start)) {
@@ -539,16 +571,20 @@ public class AppState {
    * @return the role status built up
    * @throws BadConfigException if a role of that priority already exists
    */
-  public RoleStatus buildRole(ProviderRole providerRole) throws BadConfigException {
+  public RoleStatus buildRole(ProviderRole providerRole, Component component)
+      throws BadConfigException {
     // build role status map
     int priority = providerRole.id;
     if (roleStatusMap.containsKey(priority)) {
-      throw new BadConfigException("Duplicate Provider Key: %s and %s",
-                                   providerRole,
-                                   roleStatusMap.get(priority)
-                                       .getProviderRole());
+      throw new BadConfigException("Duplicate component priority Key: %s and %s",
+          providerRole, roleStatusMap.get(priority));
     }
     RoleStatus roleStatus = new RoleStatus(providerRole);
+    roleStatus.setResourceRequirements(buildResourceRequirements(roleStatus));
+    long prev = roleStatus.getDesired();
+    setDesiredContainers(roleStatus, component.getNumberOfContainers().intValue());
+    log.info("Set desired containers for component " + component.getName() +
+        " from " + prev + " to " + roleStatus.getDesired());
     roleStatusMap.put(priority, roleStatus);
     String name = providerRole.name;
     roles.put(name, providerRole);
@@ -559,16 +595,6 @@ public class AppState {
   }
 
   /**
-   * Build up the requirements of every resource
-   */
-  private void buildRoleResourceRequirements() {
-    for (RoleStatus role : roleStatusMap.values()) {
-      role.setResourceRequirements(buildResourceRequirements(role));
-      log.info("Setting resource requirements for {} to {}", role.getName(),
-          role.getResourceRequirements());
-    }
-  }
-  /**
    * Look up the status entry of a role or raise an exception
    * @param key role ID
    * @return the status entry
@@ -731,7 +757,7 @@ public class AppState {
   }
 
   /**
-   * Enum all nodes by role. 
+   * Enum all nodes by role.
    * @param role role, or "" for all roles
    * @return a list of nodes, may be empty
    */
@@ -785,7 +811,7 @@ public class AppState {
   }
 
   /**
-   * Build a map of role->nodename->node-info
+   * Build a map of Component_name -> ContainerId -> ClusterNode
    * 
    * @return the map of Role name to list of Cluster Nodes
    */
@@ -850,7 +876,7 @@ public class AppState {
 
   /**
    * Create a container request.
-   * Update internal state, such as the role request count. 
+   * Update internal state, such as the role request count.
    * Anti-Affine: the {@link RoleStatus#outstandingAArequest} is set here.
    * This is where role history information will be used for placement decisions.
    * @param role role
@@ -942,18 +968,9 @@ public class AppState {
   }
 
   private void setDesiredContainers(RoleStatus role, int n) {
+    int delta = n - role.getComponentMetrics().containersDesired.value();
     role.getComponentMetrics().containersDesired.set(n);
-    appMetrics.containersDesired.set(n);
-  }
-
-  private void incDesiredContainers(RoleStatus role) {
-    role.getComponentMetrics().containersDesired.incr();
-    appMetrics.containersDesired.incr();
-  }
-
-  private void incDesiredContainers(RoleStatus role, int n) {
-    role.getComponentMetrics().containersDesired.incr(n);
-    appMetrics.containersDesired.incr(n);
+    appMetrics.containersDesired.incr(delta);
   }
 
   private void incCompletedContainers(RoleStatus role) {
@@ -1001,7 +1018,8 @@ public class AppState {
    * Build up the resource requirements for this role from the cluster
    * specification, including substituting max allowed values if the
    * specification asked for it (except when
-   * {@link ResourceKeys#YARN_RESOURCE_NORMALIZATION_ENABLED} is set to false).
+   * {@link org.apache.slider.api.ResourceKeys#YARN_RESOURCE_NORMALIZATION_ENABLED}
+   * is set to false).
    * @param role role
    * during normalization
    */
@@ -1009,11 +1027,6 @@ public class AppState {
     // Set up resource requirements from role values
     String name = role.getName();
     Component component = role.getProviderRole().component;
-    if (component == null) {
-      // this is for AM container
-      // TODO why do we need to create the component for AM ?
-      return Resource.newInstance(1, 512);
-    }
     int cores = DEF_YARN_CORES;
     if (component.getResource() != null && component.getResource().getCpus()
         != null) {
@@ -1282,10 +1295,13 @@ public class AppState {
       if (roleInstance != null) {
         int roleId = roleInstance.roleId;
         String rolename = roleInstance.role;
-        log.info("Failed container in role[{}] : {}", roleId, rolename);
+        log.info("Failed container in role[{}] : {}", roleId,
+            roleInstance.getCompInstanceName());
         try {
           RoleStatus roleStatus = lookupRoleStatus(roleInstance.roleId);
           decRunningContainers(roleStatus);
+          roleStatus.getProviderRole().failedInstanceName
+              .offer(roleInstance.compInstanceName);
           boolean shortLived = isShortLived(roleInstance);
           String message;
           Container failedContainer = roleInstance.container;
@@ -1571,7 +1587,7 @@ public class AppState {
 
   /**
    * Look at the allocation status of one role, and trigger add/release
-   * actions if the number of desired role instances doesn't equal 
+   * actions if the number of desired role instances doesn't equal
    * (actual + pending).
    * <p>
    * MUST be executed from within a synchronized method
@@ -1584,7 +1600,6 @@ public class AppState {
   @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
   private List<AbstractRMOperation> reviewOneRole(RoleStatus role)
       throws SliderInternalStateException, TriggerClusterTeardownException {
-    log.info("review one role " + role.getName());
     List<AbstractRMOperation> operations = new ArrayList<>();
     long delta;
     long expected;
@@ -1594,9 +1609,7 @@ public class AppState {
       expected = role.getDesired();
     }
 
-    log.info("Reviewing {} : ", role);
-    log.debug("Expected {}, Requested/Running {}, Delta: {}", expected,
-        role.getActualAndRequested(), delta);
+    log.info("Reviewing " + role.getName() + ": " + role.getComponentMetrics());
     checkFailureThreshold(role);
 
     if (expected < 0 ) {
@@ -1729,7 +1742,9 @@ public class AppState {
         for (RoleInstance possible : finalCandidates) {
           log.info("Targeting for release: {}", possible);
           containerReleaseSubmitted(possible.container);
-          operations.add(new ContainerReleaseOperation(possible.getId()));
+          role.getProviderRole().failedInstanceName
+              .offer(possible.compInstanceName);
+          operations.add(new ContainerReleaseOperation(possible.getContainerId()));
         }
       }
 
@@ -1783,7 +1798,7 @@ public class AppState {
     for (RoleInstance role : activeRoleInstances) {
       if (role.container.getId().equals(containerId)) {
         containerReleaseSubmitted(role.container);
-        operations.add(new ContainerReleaseOperation(role.getId()));
+        operations.add(new ContainerReleaseOperation(role.getContainerId()));
       }
     }
 
@@ -1907,17 +1922,6 @@ public class AppState {
   }
 
   /**
-   * Get diagnostics info about containers
-   */
-  public String getContainerDiagnosticInfo() {
-    StringBuilder builder = new StringBuilder();
-    for (RoleStatus roleStatus : getRoleStatusMap().values()) {
-      builder.append(roleStatus).append('\n');
-    }
-    return builder.toString();
-  }
-
-  /**
    * Event handler for the list of active containers on restart.
    * Sets the info key {@link StatusKeys#INFO_CONTAINERS_AM_RESTART}
    * to the size of the list passed down (and does not set it if none were)
@@ -1965,10 +1969,10 @@ public class AppState {
 
     //update app state internal structures and maps
 
+    //TODO recover the component instance name from zk registry ?
     RoleInstance instance = new RoleInstance(container);
     instance.command = roleName;
     instance.role = roleName;
-    instance.group = role.getGroup();
     instance.roleId = roleId;
     instance.environment = new String[0];
     instance.container = container;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
index de52f4e..736dfd1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
@@ -19,6 +19,7 @@
 package org.apache.slider.server.appmaster.state;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
 import org.apache.hadoop.registry.client.types.Endpoint;
 import org.apache.hadoop.registry.client.types.ProtocolTypes;
@@ -27,6 +28,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.slider.api.ClusterNode;
 import org.apache.slider.api.proto.Messages;
+import org.apache.slider.api.resource.ConfigFile;
 import org.apache.slider.api.types.ContainerInformation;
 import org.apache.slider.common.tools.SliderUtils;
 import org.apache.slider.providers.ProviderRole;
@@ -42,6 +44,8 @@ public final class RoleInstance implements Cloneable {
 
   public Container container;
   public ProviderRole providerRole;
+  public long componentId = -1;
+  public String compInstanceName = null;
   /**
    * Container ID
    */
@@ -58,7 +62,6 @@ public final class RoleInstance implements Cloneable {
    * Name of the role
    */
   public String role;
-  public String group;
 
   /**
    * Version of the app
@@ -106,7 +109,7 @@ public final class RoleInstance implements Cloneable {
   public String host;
   public String hostURL;
   public ContainerAllocationOutcome placement;
-
+  public Path compInstanceDir;
 
   /**
    * A list of registered endpoints.
@@ -114,10 +117,24 @@ public final class RoleInstance implements Cloneable {
   private List<Endpoint> endpoints =
       new ArrayList<>(2);
 
-  public RoleInstance(ContainerAssignment assignment) {
-    this(assignment.container);
-    placement = assignment.placement;
+  public RoleInstance(Container container, ProviderRole role) {
+    this(container);
+    if (role.componentIdCounter != null) {
+      componentId = role.componentIdCounter.getAndIncrement();
+      compInstanceName = role.name + componentId;
+    } else {
+      compInstanceName = role.name;
+    }
+    this.providerRole = role;
+  }
+
+  public RoleInstance(Container container, ProviderRole role,
+      String compInstanceName) {
+    this(container);
+    this.compInstanceName = compInstanceName;
+    this.providerRole = role;
   }
+
   /**
    * Create an instance to track an allocated container
    * @param container a container which must be non null, and have a non-null Id field.
@@ -136,10 +153,6 @@ public final class RoleInstance implements Cloneable {
       hostURL = "http://" + container.getNodeHttpAddress();
     }
   }
-
-  public ContainerId getId() {
-    return container.getId();
-  }
   
   public NodeId getHost() {
     return container.getNodeId();
@@ -151,6 +164,7 @@ public final class RoleInstance implements Cloneable {
       new StringBuilder("RoleInstance{");
     sb.append("role='").append(role).append('\'');
     sb.append(", id='").append(id).append('\'');
+    sb.append(", instanceName='").append(compInstanceName).append('\'');
     sb.append(", container=").append(SliderUtils.containerToString(container));
     sb.append(", createTime=").append(createTime);
     sb.append(", startTime=").append(startTime);
@@ -170,7 +184,7 @@ public final class RoleInstance implements Cloneable {
   }
 
   public ContainerId getContainerId() {
-    return container != null ? container.getId() : null;
+    return container.getId();
   }
 
   /**
@@ -322,4 +336,8 @@ public final class RoleInstance implements Cloneable {
     }
     return info;
   }
+
+  public String getCompInstanceName() {
+    return compInstanceName;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
index 5051aee..9842481 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
@@ -272,6 +272,7 @@ public final class RoleStatus implements MetricSet {
       // containers -- maybe we need releasing
       //if we are releasing, remove the number that are already released.
       //but never switch to a positive
+      // TODO, WHY is this min operation even needed ??? if delta is negative, it's always < 0 ???
       delta = Math.min(delta, 0);
     }
     return delta;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
index 118ca9d..5bc6dce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
@@ -214,6 +214,7 @@ public interface StateAccessForProviders {
 
   /**
    * Find out about the nodes for specific roles
+   * Component_name -> ContainerId -> ClusterNode
    * @return 
    */
   Map<String, Map<String, ClusterNode>> getRoleClusterNodeMapping();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
index 0f6247d..ac89ed8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
@@ -59,7 +59,7 @@ public interface RestApiErrorMessages {
       ERROR_RESOURCE_CPUS_INVALID_RANGE
           + " for component %s (or at the global level)";
   String ERROR_CONTAINERS_COUNT_INVALID =
-      "Required no of containers not specified";
+      "Invalid no of containers specified";
   String ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID =
       ERROR_CONTAINERS_COUNT_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
index 776ce00..d7c72a3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
@@ -20,17 +20,30 @@ package org.apache.slider.util;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.Artifact;
 import org.apache.slider.api.resource.Component;
+import org.apache.slider.api.resource.ConfigFile;
 import org.apache.slider.api.resource.Configuration;
 import org.apache.slider.api.resource.Resource;
 import org.apache.slider.common.tools.SliderUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-public class ServiceApiUtil {
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
 
+public class ServiceApiUtil {
+  private static final Logger log =
+      LoggerFactory.getLogger(ServiceApiUtil.class);
   @VisibleForTesting
-  public static void validateApplicationPostPayload(Application application) {
+  public static void validateApplicationPayload(Application application,
+      FileSystem fs) throws IOException {
     if (StringUtils.isEmpty(application.getName())) {
       throw new IllegalArgumentException(
           RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID);
@@ -64,11 +77,13 @@ public class ServiceApiUtil {
           application.getArtifact().getType());
 
       // container size
-      if (application.getNumberOfContainers() == null) {
+      if (application.getNumberOfContainers() == null
+          || application.getNumberOfContainers() < 0) {
         throw new IllegalArgumentException(
-            RestApiErrorMessages.ERROR_CONTAINERS_COUNT_INVALID);
+            RestApiErrorMessages.ERROR_CONTAINERS_COUNT_INVALID + ": "
+                + application.getNumberOfContainers());
       }
-
+      validateConfigFile(application.getConfiguration().getFiles(), fs);
       // Since it is a simple app with no components, create a default component
       application.getComponents().add(createDefaultComponent(application));
     } else {
@@ -114,11 +129,13 @@ public class ServiceApiUtil {
         if (comp.getNumberOfContainers() == null) {
           comp.setNumberOfContainers(globalNumberOfContainers);
         }
-        if (comp.getNumberOfContainers() == null) {
+        if (comp.getNumberOfContainers() == null
+            || comp.getNumberOfContainers() < 0) {
           throw new IllegalArgumentException(String.format(
-              RestApiErrorMessages.ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID,
-              comp.getName()));
+              RestApiErrorMessages.ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID
+                  + ": " + comp.getNumberOfContainers(), comp.getName()));
         }
+        validateConfigFile(comp.getConfiguration().getFiles(), fs);
       }
     }
 
@@ -128,6 +145,46 @@ public class ServiceApiUtil {
     }
   }
 
+  // 1) Verify the src_file exists and non-empty for template
+  // 2) dest_file is absolute path
+  private static void validateConfigFile(List<ConfigFile> list, FileSystem fs)
+      throws IOException {
+    Set<String> destFileSet = new HashSet<>();
+
+    for (ConfigFile file : list) {
+      if (file.getType().equals(ConfigFile.TypeEnum.TEMPLATE) && StringUtils
+          .isEmpty(file.getSrcFile())) {
+        throw new IllegalArgumentException(
+            "Src_file is empty for " + ConfigFile.TypeEnum.TEMPLATE);
+
+      }
+      if (!StringUtils.isEmpty(file.getSrcFile())) {
+        Path p = new Path(file.getSrcFile());
+        if (!fs.exists(p)) {
+          throw new IllegalArgumentException(
+              "Src_file does not exist for config file: " + file
+                  .getSrcFile());
+        }
+      }
+
+      if (StringUtils.isEmpty(file.getDestFile())) {
+        throw new IllegalArgumentException("Dest_file is empty.");
+      }
+      // validate dest_file is absolute
+      if (!Paths.get(file.getDestFile()).isAbsolute()) {
+        throw new IllegalArgumentException(
+            "Dest_file must be absolute path: " + file.getDestFile());
+      }
+
+      if (destFileSet.contains(file.getDestFile())) {
+        throw new IllegalArgumentException(
+            "Duplicated ConfigFile exists: " + file.getDestFile());
+      }
+      destFileSet.add(file.getDestFile());
+    }
+  }
+
+
   private static void validateApplicationResource(Resource resource,
       Component comp, Artifact.TypeEnum artifactType) {
     // Only apps/components of type APPLICATION can skip resource requirement
@@ -200,4 +257,8 @@ public class ServiceApiUtil {
     comp.setLaunchCommand(app.getLaunchCommand());
     return comp;
   }
+
+  public static String $(String s) {
+    return "${" + s +"}";
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicHistory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicHistory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicHistory.java
index da2ed0d..9e79821 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicHistory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicHistory.java
@@ -67,7 +67,7 @@ public class TestMockAppStateDynamicHistory extends BaseMockAppStateTest
     return new MockYarnEngine(8, 1);
   }
 
-  @Test
+  // TODO does not support adding new components dynamically
   public void testDynamicRoleHistory() throws Throwable {
 
     String dynamic = "dynamicRole";
@@ -81,12 +81,8 @@ public class TestMockAppStateDynamicHistory extends BaseMockAppStateTest
         .COMPONENT_PLACEMENT_POLICY, "" + placementPolicy);
     application.getComponents().add(component);
 
-    // write the definitions
-    List<ProviderRole> updates = appState.updateComponents(
+    appState.updateComponents(
         Collections.singletonMap(dynamic, desired));
-    assertEquals(1, updates.size());
-    ProviderRole updatedRole = updates.get(0);
-    assertEquals(updatedRole.placementPolicy, placementPolicy);
 
     // now look at the role map
     assertNotNull(appState.getRoleMap().get(dynamic));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java
index 01bf9bd..6d8e963 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java
@@ -87,7 +87,7 @@ public class TestMockAppStateFlexDynamicRoles extends BaseMockAppStateTest
     createAndStartNodes();
   }
 
-  @Test
+  // TODO does not support adding new components dynamically
   public void testDynamicFlexAddRole() throws Throwable {
     Application application = appState.getClusterStatus();
     Component component = new Component().name("dynamicAdd7")
@@ -96,16 +96,12 @@ public class TestMockAppStateFlexDynamicRoles extends BaseMockAppStateTest
     appState.updateComponents(Collections.singletonMap(component.getName(),
         component.getNumberOfContainers()));
     createAndStartNodes();
-    dumpClusterDescription("updated CD", appState.getClusterStatus());
     appState.lookupRoleStatus("dynamicAdd7");
   }
 
   @Test
   public void testDynamicFlexDropRole() throws Throwable {
     appState.updateComponents(Collections.singletonMap("dynamic-6", 0L));
-
-    Application getCD = appState.getClusterStatus();
-    dumpClusterDescription("updated CD", getCD);
     //status is retained for future
     appState.lookupRoleStatus("dynamic-6");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
index eaf5271..54ffe17 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
@@ -26,10 +26,15 @@ import org.apache.slider.server.appmaster.model.mock.MockRoles;
 import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
 import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
 import org.apache.slider.server.appmaster.state.MostRecentContainerReleaseSelector;
+import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.apache.slider.server.appmaster.state.RoleStatus;
 import org.junit.Test;
 
 import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.TreeMap;
 
 /**
  * Test that if you have more than one role, the right roles are chosen for
@@ -72,40 +77,76 @@ public class TestMockAppStateUniqueNames extends BaseMockAppStateTest
     return application;
   }
 
+  public static Map<String, RoleInstance> organize(List<RoleInstance>
+      instances) {
+    Map<String, RoleInstance> map = new TreeMap<>();
+    for (RoleInstance instance : instances) {
+      assertFalse("Multiple role instances for unique name " + instance
+              .compInstanceName, map.containsKey(instance.compInstanceName));
+      System.out.println("Adding to map " + instance.compInstanceName + " for" +
+          instance.role);
+      map.put(instance.compInstanceName, instance);
+    }
+    return map;
+  }
+
+  public static void verifyInstances(List<RoleInstance> instances, String
+      group, String... roles) {
+    assertEquals(roles.length, instances.size());
+    Map<String, RoleInstance> map = organize(instances);
+    int i = 0;
+    for (Entry<String, RoleInstance> entry : map.entrySet()) {
+      assertEquals(roles[i], entry.getKey());
+      RoleInstance instance = entry.getValue();
+      assertEquals(roles[i], instance.compInstanceName);
+      assertEquals(group, instance.role);
+      assertEquals(group, instance.providerRole.name);
+      assertEquals(group, instance.providerRole.group);
+      // TODO remove group from provider role if it continues to be unused
+      i++;
+    }
+  }
+
   @Test
   public void testDynamicFlexDown() throws Throwable {
     createAndStartNodes();
+    List<RoleInstance> instances = appState.cloneOwnedContainerList();
+    verifyInstances(instances, "group1", "group10", "group11");
+
     appState.updateComponents(Collections.singletonMap("group1", 0L));
     createAndStartNodes();
-    RoleStatus roleStatus = appState.lookupRoleStatus("group11");
+    instances = appState.cloneOwnedContainerList();
+    assertEquals(0, instances.size());
+
+    RoleStatus roleStatus = appState.lookupRoleStatus("group1");
     assertEquals(0, roleStatus.getDesired());
     assertEquals(1024L, roleStatus.getResourceRequirements().getMemorySize());
     assertEquals(2, roleStatus.getResourceRequirements().getVirtualCores());
     assertEquals("group1", roleStatus.getGroup());
+
+    // now flex back up
+    appState.updateComponents(Collections.singletonMap("group1", 3L));
+    createAndStartNodes();
+    instances = appState.cloneOwnedContainerList();
+    verifyInstances(instances, "group1", "group10", "group11", "group12");
+    // fails because the names continue at N+1, with group12, group13, group14
   }
 
   @Test
   public void testDynamicFlexUp() throws Throwable {
     createAndStartNodes();
+    List<RoleInstance> instances = appState.cloneOwnedContainerList();
+    verifyInstances(instances, "group1", "group10", "group11");
+
     appState.updateComponents(Collections.singletonMap("group1", 3L));
     createAndStartNodes();
-    RoleStatus group11 = appState.lookupRoleStatus("group11");
-    RoleStatus group12 = appState.lookupRoleStatus("group12");
-    RoleStatus group13 = appState.lookupRoleStatus("group13");
-    assertEquals(1, group11.getDesired());
-    assertEquals(1, group12.getDesired());
-    assertEquals(1, group13.getDesired());
-    assertEquals(1024L, group11.getResourceRequirements().getMemorySize());
-    assertEquals(1024L, group12.getResourceRequirements().getMemorySize());
-    assertEquals(1024L, group13.getResourceRequirements().getMemorySize());
-    assertEquals(2, group11.getResourceRequirements().getVirtualCores());
-    assertEquals(2, group12.getResourceRequirements().getVirtualCores());
-    assertEquals(2, group13.getResourceRequirements().getVirtualCores());
-    assertEquals("group1", group11.getGroup());
-    assertEquals("group1", group12.getGroup());
-    assertEquals("group1", group13.getGroup());
-
-    appState.refreshClusterStatus();
+    instances = appState.cloneOwnedContainerList();
+    verifyInstances(instances, "group1", "group10", "group11", "group12");
+
+    RoleStatus group1 = appState.lookupRoleStatus("group1");
+    assertEquals(3, group1.getDesired());
+    assertEquals(1024L, group1.getResourceRequirements().getMemorySize());
+    assertEquals("group1", group1.getGroup());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java
index 046bd83..d382c8a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java
@@ -27,6 +27,7 @@ import org.apache.slider.server.appmaster.model.mock.MockAppState;
 import org.apache.slider.server.appmaster.model.mock.MockRoles;
 import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
 import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.apache.slider.server.appmaster.state.RoleStatus;
 import org.junit.Test;
 
 import java.util.Collections;
@@ -47,6 +48,11 @@ public class TestMockContainerResourceAllocations extends BaseMockAppStateTest {
     Component role0 = appState.getClusterStatus().getComponent(MockRoles.ROLE0);
     role0.resource(new org.apache.slider.api.resource.Resource().memory("512")
         .cpus(2));
+    // hack - because role0 is created before the test run
+    RoleStatus role0Status =
+        appState.getRoleStatusMap().get(appState.getRoleMap().get(ROLE0).id);
+    role0Status.setResourceRequirements(
+        appState.buildResourceRequirements(role0Status));
     appState.updateComponents(Collections.singletonMap(role0.getName(),
         role0.getNumberOfContainers()));
     List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
@@ -58,12 +64,17 @@ public class TestMockContainerResourceAllocations extends BaseMockAppStateTest {
     assertEquals(2, requirements.getVirtualCores());
   }
 
+  //TODO replace with resource profile feature in yarn
   @Test
   public void testMaxMemAllocations() throws Throwable {
     // max core allocations no longer supported
     Component role0 = appState.getClusterStatus().getComponent(MockRoles.ROLE0);
     role0.resource(new org.apache.slider.api.resource.Resource()
         .memory(ResourceKeys.YARN_RESOURCE_MAX).cpus(2));
+    RoleStatus role0Status =
+        appState.getRoleStatusMap().get(appState.getRoleMap().get(ROLE0).id);
+    role0Status.setResourceRequirements(
+        appState.buildResourceRequirements(role0Status));
     appState.updateComponents(Collections.singletonMap(role0.getName(),
         role0.getNumberOfContainers()));
     List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
index eca8401..4352959 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
@@ -176,7 +176,14 @@ public abstract class BaseMockAppStateTest extends SliderTestBase implements
    */
   public RoleInstance roleInstance(ContainerAssignment assigned) {
     Container target = assigned.container;
-    RoleInstance ri = new RoleInstance(target);
+    String failedInstance =
+        assigned.role.getProviderRole().failedInstanceName.poll();
+    RoleInstance ri;
+    if (failedInstance != null) {
+      ri = new RoleInstance(target,  assigned.role.getProviderRole(), failedInstance);
+    } else {
+      ri = new RoleInstance(target, assigned.role.getProviderRole());
+    }
     ri.roleId = assigned.role.getPriority();
     ri.role = assigned.role.getName();
     return ri;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ace79d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java
index 112a5ac..4098cf7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java
@@ -30,6 +30,7 @@ import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.launch.ContainerLauncher;
 import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.providers.ProviderService;
+import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
 import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
 
@@ -118,7 +119,8 @@ public class MockProviderService implements ProviderService {
   @Override
   public void buildContainerLaunchContext(ContainerLauncher containerLauncher,
       Application application, Container container, ProviderRole providerRole,
-      SliderFileSystem sliderFileSystem) throws IOException, SliderException {
+      SliderFileSystem sliderFileSystem, RoleInstance roleInstance)
+      throws IOException, SliderException {
 
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: YARN-6533. Race condition in writing service record to registry in yarn native services. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
YARN-6533. Race condition in writing service record to registry in yarn native services. Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ad5432b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ad5432b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ad5432b

Branch: refs/heads/yarn-native-services
Commit: 8ad5432ba9a6803f63e8ebe171372cdb76443ba1
Parents: 0d9690f
Author: Jian He <ji...@apache.org>
Authored: Thu May 11 09:23:54 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:25 2017 -0700

----------------------------------------------------------------------
 .../org/apache/slider/providers/ProviderUtils.java  |  6 ++----
 .../providers/docker/DockerProviderService.java     | 11 +++++++++--
 .../slider/server/appmaster/SliderAppMaster.java    | 16 ----------------
 3 files changed, 11 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ad5432b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
index beeaa55..d58ecaa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
@@ -503,7 +503,8 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
    */
   public void updateServiceRecord(StateAccessForProviders amState,
       YarnRegistryViewForProviders yarnRegistry,
-      String containerId, String roleName, List<String> ip, String hostname) {
+      String containerId, String roleName, List<String> ip, String hostname)
+      throws IOException {
     try {
       RoleInstance role = null;
       if(ip != null && !ip.isEmpty()){
@@ -535,9 +536,6 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
     } catch (NoSuchNodeException e) {
       // ignore - there is nothing to do if we don't find a container
       log.warn("Owned container {} not found - {}", containerId, e);
-    } catch (IOException e) {
-      log.warn("Error updating container {} service record in registry",
-          containerId, e);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ad5432b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
index 12c2b04..a48bf83 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
@@ -146,8 +146,15 @@ public class DockerProviderService extends AbstractService
       return false;
     }
 
-    providerUtils.updateServiceRecord(amState, yarnRegistry,
-        containerId.toString(), instance.role, status.getIPs(), status.getHost());
+    try {
+      providerUtils.updateServiceRecord(amState, yarnRegistry,
+          containerId.toString(), instance.role, status.getIPs(), status.getHost());
+    } catch (IOException e) {
+      // could not write service record to ZK, log and retry
+      log.warn("Error updating container {} service record in registry, " +
+          "retrying", containerId, e);
+      return true;
+    }
     // TODO publish ip and host
     org.apache.slider.api.resource.Container container =
         instance.providerRole.component.getContainer(containerId.toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ad5432b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
index ae03b45..02c9198 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -1182,24 +1182,8 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       return false;
     }
     // this is where component registrations  go
-    String cid = RegistryPathUtils.encodeYarnID(id.toString());
-    ServiceRecord record = new ServiceRecord();
-    record.set(YarnRegistryAttributes.YARN_ID, cid);
-
-    record.description = roleInstance.getCompInstanceName();
     log.info("Registering component " + roleInstance.getCompInstanceName()
         + ", containerId = " + id);
-    record.set(YarnRegistryAttributes.YARN_PERSISTENCE,
-        PersistencePolicies.CONTAINER);
-    setUserProvidedServiceRecordAttributes(
-        instance.providerRole.component.getConfiguration(), record);
-    try {
-      yarnRegistryOperations.putComponent(cid, record);
-    } catch (IOException e) {
-      log.warn("Failed to register container {}/{}: {}",
-          id, roleInstance.role, e, e);
-      return false;
-    }
     org.apache.slider.api.resource.Container container =
         new org.apache.slider.api.resource.Container();
     container.setId(id.toString());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: YARN-6335. Port slider's groovy unit tests to yarn native services. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/ContractTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/ContractTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/ContractTestUtils.java
new file mode 100644
index 0000000..fc51e31
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/ContractTestUtils.java
@@ -0,0 +1,901 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.utils;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.Assert;
+import org.junit.internal.AssumptionViolatedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.EOFException;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Arrays;
+import java.util.Properties;
+import java.util.UUID;
+
+/**
+ * Utilities used across test cases to make assertions about filesystems
+ * -assertions which fail with useful information.
+ * This is lifted from Hadoop common Test; that JAR isn't published, so
+ * we have to make do.
+ */
+public class ContractTestUtils extends Assert {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ContractTestUtils.class);
+
+  public static final String IO_FILE_BUFFER_SIZE = "io.file.buffer.size";
+
+  // For scale testing, we can repeatedly write small chunk data to generate
+  // a large file.
+  public static final String IO_CHUNK_BUFFER_SIZE = "io.chunk.buffer.size";
+  public static final int DEFAULT_IO_CHUNK_BUFFER_SIZE = 128;
+  public static final String IO_CHUNK_MODULUS_SIZE = "io.chunk.modulus.size";
+  public static final int DEFAULT_IO_CHUNK_MODULUS_SIZE = 128;
+
+  /**
+   * Assert that a property in the property set matches the expected value
+   * @param props property set
+   * @param key property name
+   * @param expected expected value. If null, the property must not be in the set
+   */
+  public static void assertPropertyEquals(Properties props,
+                                          String key,
+                                          String expected) {
+    String val = props.getProperty(key);
+    if (expected == null) {
+      assertNull("Non null property " + key + " = " + val, val);
+    } else {
+      assertEquals("property " + key + " = " + val,
+                          expected,
+                          val);
+    }
+  }
+
+  /**
+   *
+   * Write a file and read it in, validating the result. Optional flags control
+   * whether file overwrite operations should be enabled, and whether the
+   * file should be deleted afterwards.
+   *
+   * If there is a mismatch between what was written and what was expected,
+   * a small range of bytes either side of the first error are logged to aid
+   * diagnosing what problem occurred -whether it was a previous file
+   * or a corrupting of the current file. This assumes that two
+   * sequential runs to the same path use datasets with different character
+   * moduli.
+   *
+   * @param fs filesystem
+   * @param path path to write to
+   * @param len length of data
+   * @param overwrite should the create option allow overwrites?
+   * @param delete should the file be deleted afterwards? -with a verification
+   * that it worked. Deletion is not attempted if an assertion has failed
+   * earlier -it is not in a <code>finally{}</code> block.
+   * @throws IOException IO problems
+   */
+  public static void writeAndRead(FileSystem fs,
+                                  Path path,
+                                  byte[] src,
+                                  int len,
+                                  int blocksize,
+                                  boolean overwrite,
+                                  boolean delete) throws IOException {
+    fs.mkdirs(path.getParent());
+
+    writeDataset(fs, path, src, len, blocksize, overwrite);
+
+    byte[] dest = readDataset(fs, path, len);
+
+    compareByteArrays(src, dest, len);
+
+    if (delete) {
+      rejectRootOperation(path);
+      boolean deleted = fs.delete(path, false);
+      assertTrue("Deleted", deleted);
+      assertPathDoesNotExist(fs, "Cleanup failed", path);
+    }
+  }
+
+  /**
+   * Write a file.
+   * Optional flags control
+   * whether file overwrite operations should be enabled
+   * @param fs filesystem
+   * @param path path to write to
+   * @param len length of data
+   * @param overwrite should the create option allow overwrites?
+   * @throws IOException IO problems
+   */
+  public static void writeDataset(FileSystem fs,
+                                   Path path,
+                                   byte[] src,
+                                   int len,
+                                   int buffersize,
+                                   boolean overwrite) throws IOException {
+    assertTrue(
+      "Not enough data in source array to write " + len + " bytes",
+      src.length >= len);
+    FSDataOutputStream out = fs.create(path,
+                                       overwrite,
+                                       fs.getConf()
+                                         .getInt(IO_FILE_BUFFER_SIZE,
+                                                 4096),
+                                       (short) 1,
+                                       buffersize);
+    out.write(src, 0, len);
+    out.close();
+    assertFileHasLength(fs, path, len);
+  }
+
+  /**
+   * Read the file and convert to a byte dataset.
+   * This implements readfully internally, so that it will read
+   * in the file without ever having to seek()
+   * @param fs filesystem
+   * @param path path to read from
+   * @param len length of data to read
+   * @return the bytes
+   * @throws IOException IO problems
+   */
+  public static byte[] readDataset(FileSystem fs, Path path, int len)
+      throws IOException {
+    FSDataInputStream in = fs.open(path);
+    byte[] dest = new byte[len];
+    int offset =0;
+    int nread = 0;
+    try {
+      while (nread < len) {
+        int nbytes = in.read(dest, offset + nread, len - nread);
+        if (nbytes < 0) {
+          throw new EOFException("End of file reached before reading fully.");
+        }
+        nread += nbytes;
+      }
+    } finally {
+      in.close();
+    }
+    return dest;
+  }
+
+  /**
+   * Read a file, verify its length and contents match the expected array
+   * @param fs filesystem
+   * @param path path to file
+   * @param original original dataset
+   * @throws IOException IO Problems
+   */
+  public static void verifyFileContents(FileSystem fs,
+                                        Path path,
+                                        byte[] original) throws IOException {
+    FileStatus stat = fs.getFileStatus(path);
+    String statText = stat.toString();
+    assertTrue("not a file " + statText, stat.isFile());
+    assertEquals("wrong length " + statText, original.length, stat.getLen());
+    byte[] bytes = readDataset(fs, path, original.length);
+    compareByteArrays(original,bytes,original.length);
+  }
+
+  /**
+   * Verify that the read at a specific offset in a stream
+   * matches that expected
+   * @param stm stream
+   * @param fileContents original file contents
+   * @param seekOff seek offset
+   * @param toRead number of bytes to read
+   * @throws IOException IO problems
+   */
+  public static void verifyRead(FSDataInputStream stm, byte[] fileContents,
+                                int seekOff, int toRead) throws IOException {
+    byte[] out = new byte[toRead];
+    stm.seek(seekOff);
+    stm.readFully(out);
+    byte[] expected = Arrays.copyOfRange(fileContents, seekOff,
+                                         seekOff + toRead);
+    compareByteArrays(expected, out,toRead);
+  }
+
+  /**
+   * Assert that tthe array original[0..len] and received[] are equal.
+   * A failure triggers the logging of the bytes near where the first
+   * difference surfaces.
+   * @param original source data
+   * @param received actual
+   * @param len length of bytes to compare
+   */
+  public static void compareByteArrays(byte[] original,
+                                       byte[] received,
+                                       int len) {
+    assertEquals("Number of bytes read != number written",
+                        len, received.length);
+    int errors = 0;
+    int first_error_byte = -1;
+    for (int i = 0; i < len; i++) {
+      if (original[i] != received[i]) {
+        if (errors == 0) {
+          first_error_byte = i;
+        }
+        errors++;
+      }
+    }
+
+    if (errors > 0) {
+      String message = String.format(" %d errors in file of length %d",
+                                     errors, len);
+      LOG.warn(message);
+      // the range either side of the first error to print
+      // this is a purely arbitrary number, to aid user debugging
+      final int overlap = 10;
+      for (int i = Math.max(0, first_error_byte - overlap);
+           i < Math.min(first_error_byte + overlap, len);
+           i++) {
+        byte actual = received[i];
+        byte expected = original[i];
+        String letter = toChar(actual);
+        String line = String.format("[%04d] %2x %s\n", i, actual, letter);
+        if (expected != actual) {
+          line = String.format("[%04d] %2x %s -expected %2x %s\n",
+                               i,
+                               actual,
+                               letter,
+                               expected,
+                               toChar(expected));
+        }
+        LOG.warn(line);
+      }
+      fail(message);
+    }
+  }
+
+  /**
+   * Convert a byte to a character for printing. If the
+   * byte value is < 32 -and hence unprintable- the byte is
+   * returned as a two digit hex value
+   * @param b byte
+   * @return the printable character string
+   */
+  public static String toChar(byte b) {
+    if (b >= 0x20) {
+      return Character.toString((char) b);
+    } else {
+      return String.format("%02x", b);
+    }
+  }
+
+  /**
+   * Convert a buffer to a string, character by character
+   * @param buffer input bytes
+   * @return a string conversion
+   */
+  public static String toChar(byte[] buffer) {
+    StringBuilder builder = new StringBuilder(buffer.length);
+    for (byte b : buffer) {
+      builder.append(toChar(b));
+    }
+    return builder.toString();
+  }
+
+  public static byte[] toAsciiByteArray(String s) {
+    char[] chars = s.toCharArray();
+    int len = chars.length;
+    byte[] buffer = new byte[len];
+    for (int i = 0; i < len; i++) {
+      buffer[i] = (byte) (chars[i] & 0xff);
+    }
+    return buffer;
+  }
+
+  /**
+   * Cleanup at the end of a test run
+   * @param action action triggering the operation (for use in logging)
+   * @param fileSystem filesystem to work with. May be null
+   * @param cleanupPath path to delete as a string
+   */
+  public static void cleanup(String action,
+                             FileSystem fileSystem,
+                             String cleanupPath) {
+    if (fileSystem == null) {
+      return;
+    }
+    Path path = new Path(cleanupPath).makeQualified(fileSystem.getUri(),
+        fileSystem.getWorkingDirectory());
+    cleanup(action, fileSystem, path);
+  }
+
+  /**
+   * Cleanup at the end of a test run
+   * @param action action triggering the operation (for use in logging)
+   * @param fileSystem filesystem to work with. May be null
+   * @param path path to delete
+   */
+  public static void cleanup(String action, FileSystem fileSystem, Path path) {
+    noteAction(action);
+    try {
+      rm(fileSystem, path, true, false);
+    } catch (Exception e) {
+      LOG.error("Error deleting in "+ action + " - "  + path + ": " + e, e);
+    }
+  }
+
+  /**
+   * Delete a directory. There's a safety check for operations against the
+   * root directory -these are intercepted and rejected with an IOException
+   * unless the allowRootDelete flag is true
+   * @param fileSystem filesystem to work with. May be null
+   * @param path path to delete
+   * @param recursive flag to enable recursive delete
+   * @param allowRootDelete can the root directory be deleted?
+   * @throws IOException on any problem.
+   */
+  public static boolean rm(FileSystem fileSystem,
+      Path path,
+      boolean recursive,
+      boolean allowRootDelete) throws
+      IOException {
+    if (fileSystem != null) {
+      rejectRootOperation(path, allowRootDelete);
+      if (fileSystem.exists(path)) {
+        return fileSystem.delete(path, recursive);
+      }
+    }
+    return false;
+
+  }
+
+  /**
+   * Block any operation on the root path. This is a safety check
+   * @param path path in the filesystem
+   * @param allowRootOperation can the root directory be manipulated?
+   * @throws IOException if the operation was rejected
+   */
+  public static void rejectRootOperation(Path path,
+      boolean allowRootOperation) throws IOException {
+    if (path.isRoot() && !allowRootOperation) {
+      throw new IOException("Root directory operation rejected: " + path);
+    }
+  }
+
+  /**
+   * Block any operation on the root path. This is a safety check
+   * @param path path in the filesystem
+   * @throws IOException if the operation was rejected
+   */
+  public static void rejectRootOperation(Path path) throws IOException {
+    rejectRootOperation(path, false);
+  }
+
+
+  public static void noteAction(String action) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("==============  "+ action +" =============");
+    }
+  }
+
+  /**
+   * downgrade a failure to a message and a warning, then an
+   * exception for the Junit test runner to mark as failed
+   * @param message text message
+   * @param failure what failed
+   * @throws AssumptionViolatedException always
+   */
+  public static void downgrade(String message, Throwable failure) {
+    LOG.warn("Downgrading test " + message, failure);
+    AssumptionViolatedException ave =
+      new AssumptionViolatedException(failure, null);
+    throw ave;
+  }
+
+  /**
+   * report an overridden test as unsupported
+   * @param message message to use in the text
+   * @throws AssumptionViolatedException always
+   */
+  public static void unsupported(String message) {
+    skip(message);
+  }
+
+  /**
+   * report a test has been skipped for some reason
+   * @param message message to use in the text
+   * @throws AssumptionViolatedException always
+   */
+  public static void skip(String message) {
+    LOG.info("Skipping: {}", message);
+    throw new AssumptionViolatedException(message);
+  }
+
+  /**
+   * Fail with an exception that was received
+   * @param text text to use in the exception
+   * @param thrown a (possibly null) throwable to init the cause with
+   * @throws AssertionError with the text and throwable -always
+   */
+  public static void fail(String text, Throwable thrown) {
+    AssertionError e = new AssertionError(text);
+    e.initCause(thrown);
+    throw e;
+  }
+
+  /**
+   * Make an assertion about the length of a file
+   * @param fs filesystem
+   * @param path path of the file
+   * @param expected expected length
+   * @throws IOException on File IO problems
+   */
+  public static void assertFileHasLength(FileSystem fs, Path path,
+                                         int expected) throws IOException {
+    FileStatus status = fs.getFileStatus(path);
+    assertEquals(
+      "Wrong file length of file " + path + " status: " + status,
+      expected,
+      status.getLen());
+  }
+
+  /**
+   * Assert that a path refers to a directory
+   * @param fs filesystem
+   * @param path path of the directory
+   * @throws IOException on File IO problems
+   */
+  public static void assertIsDirectory(FileSystem fs,
+                                       Path path) throws IOException {
+    FileStatus fileStatus = fs.getFileStatus(path);
+    assertIsDirectory(fileStatus);
+  }
+
+  /**
+   * Assert that a path refers to a directory
+   * @param fileStatus stats to check
+   */
+  public static void assertIsDirectory(FileStatus fileStatus) {
+    assertTrue("Should be a directory -but isn't: " + fileStatus,
+               fileStatus.isDirectory());
+  }
+
+  /**
+   * Write the text to a file, returning the converted byte array
+   * for use in validating the round trip
+   * @param fs filesystem
+   * @param path path of file
+   * @param text text to write
+   * @param overwrite should the operation overwrite any existing file?
+   * @return the read bytes
+   * @throws IOException on IO problems
+   */
+  public static byte[] writeTextFile(FileSystem fs,
+                                   Path path,
+                                   String text,
+                                   boolean overwrite) throws IOException {
+    byte[] bytes = new byte[0];
+    if (text != null) {
+      bytes = toAsciiByteArray(text);
+    }
+    createFile(fs, path, overwrite, bytes);
+    return bytes;
+  }
+
+  /**
+   * Create a file
+   * @param fs filesystem
+   * @param path       path to write
+   * @param overwrite overwrite flag
+   * @param data source dataset. Can be null
+   * @throws IOException on any problem
+   */
+  public static void createFile(FileSystem fs,
+                                 Path path,
+                                 boolean overwrite,
+                                 byte[] data) throws IOException {
+    FSDataOutputStream stream = fs.create(path, overwrite);
+    if (data != null && data.length > 0) {
+      stream.write(data);
+    }
+    stream.close();
+  }
+
+  /**
+   * Touch a file
+   * @param fs filesystem
+   * @param path path
+   * @throws IOException IO problems
+   */
+  public static void touch(FileSystem fs,
+                           Path path) throws IOException {
+    createFile(fs, path, true, null);
+  }
+
+  /**
+   * Delete a file/dir and assert that delete() returned true
+   * <i>and</i> that the path no longer exists. This variant rejects
+   * all operations on root directories
+   * @param fs filesystem
+   * @param file path to delete
+   * @param recursive flag to enable recursive delete
+   * @throws IOException IO problems
+   */
+  public static void assertDeleted(FileSystem fs,
+                                   Path file,
+                                   boolean recursive) throws IOException {
+    assertDeleted(fs, file, recursive, false);
+  }
+
+  /**
+   * Delete a file/dir and assert that delete() returned true
+   * <i>and</i> that the path no longer exists. This variant rejects
+   * all operations on root directories
+   * @param fs filesystem
+   * @param file path to delete
+   * @param recursive flag to enable recursive delete
+   * @param allowRootOperations can the root dir be deleted?
+   * @throws IOException IO problems
+   */
+  public static void assertDeleted(FileSystem fs,
+      Path file,
+      boolean recursive,
+      boolean allowRootOperations) throws IOException {
+    rejectRootOperation(file, allowRootOperations);
+    assertPathExists(fs, "about to be deleted file", file);
+    boolean deleted = fs.delete(file, recursive);
+    String dir = ls(fs, file.getParent());
+    assertTrue("Delete failed on " + file + ": " + dir, deleted);
+    assertPathDoesNotExist(fs, "Deleted file", file);
+  }
+
+  /**
+   * Read in "length" bytes, convert to an ascii string
+   * @param fs filesystem
+   * @param path path to read
+   * @param length #of bytes to read.
+   * @return the bytes read and converted to a string
+   * @throws IOException IO problems
+   */
+  public static String readBytesToString(FileSystem fs,
+                                  Path path,
+                                  int length) throws IOException {
+    FSDataInputStream in = fs.open(path);
+    try {
+      byte[] buf = new byte[length];
+      in.readFully(0, buf);
+      return toChar(buf);
+    } finally {
+      in.close();
+    }
+  }
+
+  /**
+   * Take an array of filestats and convert to a string (prefixed w/ a [01] counter
+   * @param stats array of stats
+   * @param separator separator after every entry
+   * @return a stringified set
+   */
+  public static String fileStatsToString(FileStatus[] stats, String separator) {
+    StringBuilder buf = new StringBuilder(stats.length * 128);
+    for (int i = 0; i < stats.length; i++) {
+      buf.append(String.format("[%02d] %s", i, stats[i])).append(separator);
+    }
+    return buf.toString();
+  }
+
+  /**
+   * List a directory
+   * @param fileSystem FS
+   * @param path path
+   * @return a directory listing or failure message
+   * @throws IOException
+   */
+  public static String ls(FileSystem fileSystem, Path path) throws IOException {
+    if (path == null) {
+      //surfaces when someone calls getParent() on something at the top of the path
+      return "/";
+    }
+    FileStatus[] stats;
+    String pathtext = "ls " + path;
+    try {
+      stats = fileSystem.listStatus(path);
+    } catch (FileNotFoundException e) {
+      return pathtext + " -file not found";
+    } catch (IOException e) {
+      return pathtext + " -failed: " + e;
+    }
+    return dumpStats(pathtext, stats);
+  }
+
+  public static String dumpStats(String pathname, FileStatus[] stats) {
+    return pathname + fileStatsToString(stats, "\n");
+  }
+
+   /**
+   * Assert that a file exists and whose {@link FileStatus} entry
+   * declares that this is a file and not a symlink or directory.
+   * @param fileSystem filesystem to resolve path against
+   * @param filename name of the file
+   * @throws IOException IO problems during file operations
+   */
+  public static void assertIsFile(FileSystem fileSystem, Path filename) throws
+                                                                 IOException {
+    assertPathExists(fileSystem, "Expected file", filename);
+    FileStatus status = fileSystem.getFileStatus(filename);
+    assertIsFile(filename, status);
+  }
+
+  /**
+   * Assert that a file exists and whose {@link FileStatus} entry
+   * declares that this is a file and not a symlink or directory.
+   * @param filename name of the file
+   * @param status file status
+   */
+  public static void assertIsFile(Path filename, FileStatus status) {
+    String fileInfo = filename + "  " + status;
+    assertFalse("File claims to be a directory " + fileInfo,
+                status.isDirectory());
+    assertFalse("File claims to be a symlink " + fileInfo,
+                       status.isSymlink());
+  }
+
+  /**
+   * Create a dataset for use in the tests; all data is in the range
+   * base to (base+modulo-1) inclusive
+   * @param len length of data
+   * @param base base of the data
+   * @param modulo the modulo
+   * @return the newly generated dataset
+   */
+  public static byte[] dataset(int len, int base, int modulo) {
+    byte[] dataset = new byte[len];
+    for (int i = 0; i < len; i++) {
+      dataset[i] = (byte) (base + (i % modulo));
+    }
+    return dataset;
+  }
+
+  /**
+   * Assert that a path exists -but make no assertions as to the
+   * type of that entry
+   *
+   * @param fileSystem filesystem to examine
+   * @param message message to include in the assertion failure message
+   * @param path path in the filesystem
+   * @throws FileNotFoundException raised if the path is missing
+   * @throws IOException IO problems
+   */
+  public static void assertPathExists(FileSystem fileSystem, String message,
+                               Path path) throws IOException {
+    if (!fileSystem.exists(path)) {
+      //failure, report it
+      String listing = ls(fileSystem, path.getParent());
+      throw new FileNotFoundException(message + ": not found " + path
+        + " in \"" + path.getParent() + "\" :\n" + listing);
+    }
+  }
+
+  /**
+   * Assert that a path does not exist
+   *
+   * @param fileSystem filesystem to examine
+   * @param message message to include in the assertion failure message
+   * @param path path in the filesystem
+   * @throws IOException IO problems
+   */
+  public static void assertPathDoesNotExist(FileSystem fileSystem,
+                                            String message,
+                                            Path path) throws IOException {
+    try {
+      FileStatus status = fileSystem.getFileStatus(path);
+      fail(message + ": unexpectedly found " + path + " as  " + status);
+    } catch (FileNotFoundException expected) {
+      //this is expected
+
+    }
+  }
+
+  /**
+   * Assert that a FileSystem.listStatus on a dir finds the subdir/child entry
+   * @param fs filesystem
+   * @param dir directory to scan
+   * @param subdir full path to look for
+   * @throws IOException IO probles
+   */
+  public static void assertListStatusFinds(FileSystem fs,
+                                           Path dir,
+                                           Path subdir) throws IOException {
+    FileStatus[] stats = fs.listStatus(dir);
+    boolean found = false;
+    StringBuilder builder = new StringBuilder();
+    for (FileStatus stat : stats) {
+      builder.append(stat.toString()).append('\n');
+      if (stat.getPath().equals(subdir)) {
+        found = true;
+      }
+    }
+    assertTrue("Path " + subdir
+                      + " not found in directory " + dir + ":" + builder,
+                      found);
+  }
+
+  /**
+   * Test for the host being an OSX machine
+   * @return true if the JVM thinks that is running on OSX
+   */
+  public static boolean isOSX() {
+    return System.getProperty("os.name").contains("OS X");
+  }
+
+  /**
+   * compare content of file operations using a double byte array
+   * @param concat concatenated files
+   * @param bytes bytes
+   */
+  public static void validateFileContent(byte[] concat, byte[][] bytes) {
+    int idx = 0;
+    boolean mismatch = false;
+
+    for (byte[] bb : bytes) {
+      for (byte b : bb) {
+        if (b != concat[idx++]) {
+          mismatch = true;
+          break;
+        }
+      }
+      if (mismatch)
+        break;
+    }
+    assertFalse("File content of file is not as expected at offset " + idx,
+                mismatch);
+  }
+
+  /**
+   * Receives test data from the given input file and checks the size of the
+   * data as well as the pattern inside the received data.
+   *
+   * @param fs FileSystem
+   * @param path Input file to be checked
+   * @param expectedSize the expected size of the data to be read from the
+   *        input file in bytes
+   * @param bufferLen Pattern length
+   * @param modulus   Pattern modulus
+   * @throws IOException
+   *         thrown if an error occurs while reading the data
+   */
+  public static void verifyReceivedData(FileSystem fs, Path path,
+                                      final long expectedSize,
+                                      final int bufferLen,
+                                      final int modulus) throws IOException {
+    final byte[] testBuffer = new byte[bufferLen];
+
+    long totalBytesRead = 0;
+    int nextExpectedNumber = 0;
+    final InputStream inputStream = fs.open(path);
+    try {
+      while (true) {
+        final int bytesRead = inputStream.read(testBuffer);
+        if (bytesRead < 0) {
+          break;
+        }
+
+        totalBytesRead += bytesRead;
+
+        for (int i = 0; i < bytesRead; ++i) {
+          if (testBuffer[i] != nextExpectedNumber) {
+            throw new IOException("Read number " + testBuffer[i]
+                + " but expected " + nextExpectedNumber);
+          }
+
+          ++nextExpectedNumber;
+
+          if (nextExpectedNumber == modulus) {
+            nextExpectedNumber = 0;
+          }
+        }
+      }
+
+      if (totalBytesRead != expectedSize) {
+        throw new IOException("Expected to read " + expectedSize +
+            " bytes but only received " + totalBytesRead);
+      }
+    } finally {
+      inputStream.close();
+    }
+  }
+
+  /**
+   * Generates test data of the given size according to some specific pattern
+   * and writes it to the provided output file.
+   *
+   * @param fs FileSystem
+   * @param path Test file to be generated
+   * @param size The size of the test data to be generated in bytes
+   * @param bufferLen Pattern length
+   * @param modulus   Pattern modulus
+   * @throws IOException
+   *         thrown if an error occurs while writing the data
+   */
+  public static long generateTestFile(FileSystem fs, Path path,
+                                      final long size,
+                                      final int bufferLen,
+                                      final int modulus) throws IOException {
+    final byte[] testBuffer = new byte[bufferLen];
+    for (int i = 0; i < testBuffer.length; ++i) {
+      testBuffer[i] = (byte) (i % modulus);
+    }
+
+    final OutputStream outputStream = fs.create(path, false);
+    long bytesWritten = 0;
+    try {
+      while (bytesWritten < size) {
+        final long diff = size - bytesWritten;
+        if (diff < testBuffer.length) {
+          outputStream.write(testBuffer, 0, (int) diff);
+          bytesWritten += diff;
+        } else {
+          outputStream.write(testBuffer);
+          bytesWritten += testBuffer.length;
+        }
+      }
+
+      return bytesWritten;
+    } finally {
+      outputStream.close();
+    }
+  }
+
+  /**
+   * Creates and reads a file with the given size. The test file is generated
+   * according to a specific pattern so it can be easily verified even if it's
+   * a multi-GB one.
+   * During the read phase the incoming data stream is also checked against
+   * this pattern.
+   *
+   * @param fs FileSystem
+   * @param parent Test file parent dir path
+   * @throws IOException
+   *    thrown if an I/O error occurs while writing or reading the test file
+   */
+  public static void createAndVerifyFile(FileSystem fs, Path parent, final long fileSize)
+      throws IOException {
+    int testBufferSize = fs.getConf()
+        .getInt(IO_CHUNK_BUFFER_SIZE, DEFAULT_IO_CHUNK_BUFFER_SIZE);
+    int modulus = fs.getConf()
+        .getInt(IO_CHUNK_MODULUS_SIZE, DEFAULT_IO_CHUNK_MODULUS_SIZE);
+
+    final String objectName = UUID.randomUUID().toString();
+    final Path objectPath = new Path(parent, objectName);
+
+    // Write test file in a specific pattern
+    assertEquals(fileSize,
+        generateTestFile(fs, objectPath, fileSize, testBufferSize, modulus));
+    assertPathExists(fs, "not created successful", objectPath);
+
+    // Now read the same file back and verify its content
+    try {
+      verifyReceivedData(fs, objectPath, fileSize, testBufferSize, modulus);
+    } finally {
+      // Delete test file
+      fs.delete(objectPath, false);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/KeysForTests.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/KeysForTests.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/KeysForTests.java
new file mode 100644
index 0000000..cf96407
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/KeysForTests.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.utils;
+
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.common.SliderXMLConfKeysForTesting;
+
+/**
+ * Keys shared across tests.
+ */
+public interface KeysForTests extends SliderKeys, SliderXMLConfKeysForTesting {
+  /**
+   * Username for all clusters, ZK, etc.
+   */
+  String USERNAME = "bigdataborat";
+
+  int WAIT_TIME = 120;
+  String WAIT_TIME_ARG =  Integer.toString(WAIT_TIME);
+
+  String SLIDER_TEST_XML = "slider-test.xml";
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/MicroZKCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/MicroZKCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/MicroZKCluster.java
new file mode 100644
index 0000000..be452f1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/MicroZKCluster.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.utils;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.registry.client.impl.zk.RegistryOperationsService;
+import org.apache.hadoop.registry.server.services.MicroZookeeperService;
+import org.apache.slider.common.tools.SliderUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.io.IOException;
+
+/**
+ * Test ZK cluster.
+ */
+public class MicroZKCluster implements Closeable {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(MicroZKCluster.class);
+
+  public static final String HOSTS = "127.0.0.1";
+  private MicroZookeeperService zkService;
+  private String zkBindingString;
+  private final Configuration conf;
+  private RegistryOperations registryOperations;
+
+  MicroZKCluster() {
+    this(SliderUtils.createConfiguration());
+  }
+
+  MicroZKCluster(Configuration conf) {
+    this.conf = conf;
+  }
+
+  String getZkBindingString() {
+    return zkBindingString;
+  }
+
+  void createCluster(String name) {
+    zkService = new MicroZookeeperService(name);
+
+    zkService.init(conf);
+    zkService.start();
+    zkBindingString = zkService.getConnectionString();
+    LOG.info("Created {}", this);
+    registryOperations = new RegistryOperationsService(
+        "registry",
+        zkService);
+    registryOperations.init(conf);
+    registryOperations.start();
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (registryOperations != null) {
+      registryOperations.stop();
+    }
+    if (zkService != null) {
+      zkService.stop();
+    }
+  }
+
+  @Override
+  public String toString() {
+    return "Micro ZK cluster as " + zkBindingString;
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/Outcome.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/Outcome.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/Outcome.java
new file mode 100644
index 0000000..52875d3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/Outcome.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.utils;
+
+/**
+ * Outcome for probes.
+ */
+public final class Outcome {
+
+  private final String name;
+
+  private Outcome(String name) {
+    this.name = name;
+  }
+
+  public static final Outcome SUCCESS = new Outcome(
+      "Success");
+  public static final Outcome RETRY = new Outcome("Retry");
+  public static final Outcome FAIL = new Outcome("Fail");
+
+  /**
+   * Build from a bool, where false is mapped to retry.
+   * @param b boolean
+   * @return an outcome
+   */
+  static Outcome fromBool(boolean b) {
+    return b ? SUCCESS : RETRY;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/SliderTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/SliderTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/SliderTestBase.java
new file mode 100644
index 0000000..f7da585
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/SliderTestBase.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.utils;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.slider.common.SliderXMLConfKeysForTesting;
+import org.apache.slider.server.appmaster.management.MetricsAndMonitoring;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+
+import java.io.File;
+
+
+/**
+ * Base class for unit tests as well as ones starting mini clusters
+ * -the foundational code and methods.
+ *
+ */
+public abstract class SliderTestBase extends SliderTestUtils {
+
+  /**
+   * Singleton metric registry.
+   */
+  public static final MetricsAndMonitoring METRICS = new MetricsAndMonitoring();
+  public static final int WEB_STARTUP_TIME = 30000;
+
+  @Rule
+  public TestName methodName = new TestName();
+
+  @BeforeClass
+  public static void nameThread() {
+    Thread.currentThread().setName("JUnit");
+  }
+
+  @Before
+  public void setup() throws Exception {
+    setSliderClientClassName(DEFAULT_SLIDER_CLIENT);
+    FileUtil.fullyDelete(new File(SliderXMLConfKeysForTesting
+        .TEST_SECURITY_DIR));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/SliderTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/SliderTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/SliderTestUtils.java
new file mode 100644
index 0000000..fc29b5e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/SliderTestUtils.java
@@ -0,0 +1,1065 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.utils;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectReader;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonParser;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.service.ServiceStateException;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Container;
+import org.apache.slider.client.SliderClient;
+import org.apache.slider.common.params.Arguments;
+import org.apache.slider.common.tools.Duration;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.main.LauncherExitCodes;
+import org.apache.slider.core.main.ServiceLaunchException;
+import org.apache.slider.core.main.ServiceLauncher;
+import org.apache.slider.core.persist.JsonSerDeser;
+import org.apache.slider.core.registry.docstore.PublishedConfigSet;
+import org.apache.slider.core.registry.docstore.PublishedConfiguration;
+import org.apache.slider.server.services.workflow.ForkedProcessService;
+import org.codehaus.jackson.map.PropertyNamingStrategy;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.slider.common.params.Arguments.ARG_OPTION;
+
+/**
+ * Static utils for tests in this package and in other test projects.
+ *
+ * It is designed to work with mini clusters as well as remote ones
+ *
+ * This class is not final and may be extended for test cases.
+ *
+ * Some of these methods are derived from the SwiftUtils and SwiftTestUtils
+ * classes -replicated here so that they are available in Hadoop-2.0 code
+ */
+public class SliderTestUtils extends Assert {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SliderTestUtils.class);
+  public static final String DEFAULT_SLIDER_CLIENT = SliderClient.class
+      .getName();
+  private static String sliderClientClassName = DEFAULT_SLIDER_CLIENT;
+
+  public static final Map<String, String> EMPTY_MAP = Collections.emptyMap();
+  public static final Map<String, Integer> EMPTY_INT_MAP = Collections
+      .emptyMap();
+  public static final List<String> EMPTY_LIST = Collections.emptyList();
+
+  public static final ObjectReader OBJECT_READER;
+  public static final ObjectWriter OBJECT_WRITER;
+
+  public static final JsonSerDeser<Application> JSON_SER_DESER =
+      new JsonSerDeser<>(Application.class,
+          PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES);
+
+  static {
+    ObjectMapper mapper = new ObjectMapper();
+    OBJECT_READER = mapper.readerFor(Object.class);
+    OBJECT_WRITER = mapper.writer();
+  }
+
+  /**
+   * Action that returns an object.
+   */
+  public interface Action {
+    Object invoke() throws Exception;
+  }
+
+  /**
+   * Probe that returns an Outcome.
+   */
+  public interface Probe {
+    Outcome invoke(Map args) throws Exception;
+  }
+
+  public static void setSliderClientClassName(String sliderClientClassName) {
+    sliderClientClassName = sliderClientClassName;
+  }
+
+  public static void describe(String s) {
+    LOG.info("");
+    LOG.info("===============================");
+    LOG.info(s);
+    LOG.info("===============================");
+    LOG.info("");
+  }
+
+  /**
+   * Convert a JSON string to something readable.
+   * @param json
+   * @return a string for printing
+   */
+  public static String prettyPrintJson(String json) {
+    Gson gson = new GsonBuilder().setPrettyPrinting().create();
+    return gson.toJson(new JsonParser().parse(json));
+  }
+
+  /**
+   * Convert an object to something readable.
+   * @param src
+   * @return a string for printing
+   */
+  public static String prettyPrintAsJson(Object src)
+      throws JsonProcessingException, UnsupportedEncodingException {
+    return new String(OBJECT_WRITER.writeValueAsBytes(src), "UTF8");
+  }
+
+  /**
+   * Skip the test with a message.
+   * @param message message logged and thrown
+   */
+  public static void skip(String message) {
+    LOG.warn("Skipping test: {}", message);
+    Assume.assumeTrue(message, false);
+  }
+
+  /**
+   * Skip the test with a message if condition holds.
+   * @param condition predicate
+   * @param message message logged and thrown
+   */
+  public static void assume(boolean condition, String message) {
+    if (!condition) {
+      skip(message);
+    }
+  }
+
+  /**
+   * Skip a test if not running on Windows.
+   */
+  public static void assumeWindows() {
+    assume(Shell.WINDOWS, "not windows");
+  }
+
+  /**
+   * Skip a test if running on Windows.
+   */
+  public static void assumeNotWindows() {
+    assume(!Shell.WINDOWS, "windows");
+  }
+
+  /**
+   * Skip a test on windows.
+   */
+  public static void skipOnWindows() {
+    assumeNotWindows();
+  }
+
+  /**
+   * Equality size for a list.
+   * @param left
+   * @param right
+   */
+  public static void assertListEquals(List left, List right) {
+    String lval = collectionToString(left);
+    String rval = collectionToString(right);
+    String text = "comparing " + lval + " to " + rval;
+    assertEquals(text, left.size(), right.size());
+    for (int i = 0; i < left.size(); i++) {
+      assertEquals(text, left.get(i), right.get(i));
+    }
+  }
+
+  /**
+   * Assert a list has a given length.
+   * @param list list
+   * @param size size to have
+   */
+  public static void assertListLength(List list, int size) {
+    String lval = collectionToString(list);
+    assertEquals(lval, size, list.size());
+  }
+
+  /**
+   * Stringify a collection with [ ] at either end.
+   * @param collection collection
+   * @return string value
+   */
+  public static String collectionToString(List collection) {
+    return "[" + SliderUtils.join(collection, ", ", false) + "]";
+  }
+
+  /**
+   * Assume that a string option is set and not equal to "".
+   * @param conf configuration file
+   * @param key key to look for
+   */
+  public static void assumeStringOptionSet(Configuration conf, String key) {
+    if (SliderUtils.isUnset(conf.getTrimmed(key))) {
+      skip("Configuration key " + key + " not set");
+    }
+  }
+
+  /**
+   * assert that a string option is set and not equal to "".
+   * @param conf configuration file
+   * @param key key to look for
+   */
+  public static void assertStringOptionSet(Configuration conf, String key) {
+    getRequiredConfOption(conf, key);
+  }
+
+  /**
+   * Assume that a boolean option is set and true.
+   * Unset or false triggers a test skip
+   * @param conf configuration file
+   * @param key key to look for
+   */
+  public static void assumeBoolOptionTrue(Configuration conf, String key) {
+    assumeBoolOption(conf, key, false);
+  }
+
+  /**
+   * Assume that a boolean option is true.
+   * False triggers a test skip
+   * @param conf configuration file
+   * @param key key to look for
+   * @param defval default value if the property is not defined
+   */
+  public static void assumeBoolOption(
+      Configuration conf, String key, boolean defval) {
+    assume(conf.getBoolean(key, defval),
+        "Configuration key " + key + " is false");
+  }
+
+  /**
+   * Get a required config option (trimmed, incidentally).
+   * Test will fail if not set
+   * @param conf configuration
+   * @param key key
+   * @return the string
+   */
+  public static String getRequiredConfOption(Configuration conf, String key) {
+    String val = conf.getTrimmed(key);
+    if (SliderUtils.isUnset(val)) {
+      fail("Missing configuration option " + key);
+    }
+    return val;
+  }
+
+  /**
+   * Fails a test because required behavior has not been implemented.
+   */
+  public static void failNotImplemented() {
+    fail("Not implemented");
+  }
+
+  /**
+   * Assert that any needed libraries being present. On Unix none are needed;
+   * on windows they must be present
+   */
+  public static void assertNativeLibrariesPresent() {
+    String errorText = SliderUtils.checkForRequiredNativeLibraries();
+    if (SliderUtils.isSet(errorText)) {
+      fail(errorText);
+    }
+  }
+
+  protected static String[] toArray(List<Object> args) {
+    String[] converted = new String[args.size()];
+    for (int i = 0; i < args.size(); i++) {
+      Object elt = args.get(i);
+      assertNotNull(args.get(i));
+      converted[i] = elt.toString();
+    }
+    return converted;
+  }
+
+  public static void waitWhileClusterLive(SliderClient client, int timeout)
+      throws IOException, YarnException {
+    Duration duration = new Duration(timeout);
+    duration.start();
+    while (client.actionExists(client.getDeployedClusterName(), true) ==
+        LauncherExitCodes.EXIT_SUCCESS && !duration.getLimitExceeded()) {
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+      }
+    }
+    if (duration.getLimitExceeded()) {
+      fail("Cluster " + client.getDeployedClusterName() + " still live after " +
+          timeout + " ms");
+    }
+  }
+
+  public static void waitUntilClusterLive(SliderClient client, int timeout)
+      throws IOException, YarnException {
+    Duration duration = new Duration(timeout);
+    duration.start();
+    while (LauncherExitCodes.EXIT_SUCCESS != client.actionExists(
+        client.getDeployedClusterName(), true) &&
+           !duration.getLimitExceeded()) {
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+      }
+    }
+    if (duration.getLimitExceeded()) {
+      fail("Cluster " + client.getDeployedClusterName() + " not live after " +
+          timeout + " ms");
+    }
+  }
+
+  public static void dumpClusterDescription(
+      String text,
+      Application status) throws IOException {
+    describe(text);
+    LOG.info(JSON_SER_DESER.toJson(status));
+  }
+
+  /**
+   * Assert that a service operation succeeded.
+   * @param service service
+   */
+  public static void assertSucceeded(ServiceLauncher service) {
+    assertEquals(0, service.getServiceExitCode());
+  }
+
+  public static void assertContainersLive(Application application,
+      String component, int expected) {
+    LOG.info("Asserting component {} expected count {}", component, expected);
+    int actual = extractLiveContainerCount(application, component);
+    if (expected != actual) {
+      LOG.warn("{} actual={}, expected {} in \n{}\n", component, actual,
+          expected, application);
+    }
+    assertEquals(expected, actual);
+  }
+
+  /**
+   * Robust extraction of live container count.
+   * @param application status
+   * @param component component to resolve
+   * @return the number of containers live.
+   */
+  public static int extractLiveContainerCount(
+      Application application,
+      String component) {
+    int actual = 0;
+    if (application.getContainers() != null) {
+      for (Container container : application.getContainers()) {
+        if (container.getComponentName().equals(component)) {
+          actual++;
+        }
+      }
+    }
+    return actual;
+  }
+
+  /**
+   * Exec a set of commands, wait a few seconds for it to finish.
+   * @param status code
+   * @param commands
+   * @return the process
+   */
+  public static ForkedProcessService exec(int status, List<String> commands)
+      throws IOException, TimeoutException {
+    ForkedProcessService process = exec(commands);
+
+    Integer exitCode = process.getExitCode();
+    assertNotNull(exitCode);
+    assertEquals(status, exitCode.intValue());
+    return process;
+  }
+
+  /**
+   * Exec a set of commands, wait a few seconds for it to finish.
+   * @param commands
+   * @return
+   */
+  public static ForkedProcessService exec(List<String> commands)
+      throws IOException, TimeoutException {
+    ForkedProcessService process;
+    process = new ForkedProcessService(
+        commands.get(0),
+        EMPTY_MAP,
+        commands);
+    process.init(new Configuration());
+    process.start();
+    int timeoutMillis = 5000;
+    if (!process.waitForServiceToStop(timeoutMillis)) {
+      throw new TimeoutException(
+          "Process did not stop in " + timeoutMillis + "mS");
+    }
+    return process;
+  }
+
+  /**
+   * Determine whether an application exists. Run the commands and if the
+   * operation fails with a FileNotFoundException, then
+   * this method returns false.
+   * <p>
+   *   Run something harmless like a -version command, something
+   *   which must return 0
+   *
+   * @param commands
+   * @return true if the command sequence succeeded
+   * false if they failed with no file
+   * @throws Exception on any other failure cause
+   */
+  public static boolean doesAppExist(List<String> commands)
+      throws IOException, TimeoutException {
+    try {
+      exec(0, commands);
+      return true;
+    } catch (ServiceStateException e) {
+      if (!(e.getCause() instanceof FileNotFoundException)) {
+        throw e;
+      }
+      return false;
+    }
+  }
+
+  /**
+   * Locate an executable on the path.
+   * @param exe executable name. If it is an absolute path which
+   * exists then it will returned direct
+   * @return the path to an exe or null for no match
+   */
+  public static File locateExecutable(String exe) {
+    File exeNameAsPath = new File(exe).getAbsoluteFile();
+    if (exeNameAsPath.exists()) {
+      return exeNameAsPath;
+    }
+
+    File exepath = null;
+    String path = extractPath();
+    String[] dirs = path.split(System.getProperty("path.separator"));
+    for (String dirname : dirs) {
+      File dir = new File(dirname);
+
+      File possible = new File(dir, exe);
+      if (possible.exists()) {
+        exepath = possible;
+      }
+    }
+    return exepath;
+  }
+
+  /**
+   * Lookup the PATH env var.
+   * @return the path or null
+   */
+  public static String extractPath() {
+    return extractEnvVar("PATH");
+  }
+
+  /**
+   * Find an environment variable. Uses case independent checking for
+   * the benefit of windows.
+   * Will fail if the var is not found.
+   * @param var path variable <i>in upper case</i>
+   * @return the env var
+   */
+  public static String extractEnvVar(String var) {
+    String realkey = "";
+
+    for (String it : System.getenv().keySet()) {
+      if (it.toUpperCase(Locale.ENGLISH).equals(var)) {
+        realkey = it;
+      }
+    }
+
+    if (SliderUtils.isUnset(realkey)) {
+      fail("No environment variable " + var + " found");
+    }
+    String val = System.getenv(realkey);
+
+    LOG.info("{} = {}", realkey, val);
+    return val;
+  }
+
+  /**
+   * Create a temp JSON file. After coming up with the name, the file
+   * is deleted
+   * @return the filename
+   */
+  public static  File createTempJsonFile() throws IOException {
+    return tmpFile(".json");
+  }
+
+  /**
+   * Create a temp file with the specific name. It's deleted after creation,
+   * to avoid  "file exists exceptions"
+   * @param suffix suffix, e.g. ".txt"
+   * @return a path to a file which may be created
+   */
+  public static File tmpFile(String suffix) throws IOException {
+    File reportFile = File.createTempFile(
+        "temp",
+        suffix,
+        new File("target"));
+    reportFile.delete();
+    return reportFile;
+  }
+
+  /**
+   * Execute a closure, assert it fails with a given exit code and text.
+   * @param exitCode exit code
+   * @param text text (can be "")
+   * @param action action
+   * @return
+   */
+  public void  assertFailsWithException(int exitCode,
+      String text,
+      Action action) throws Exception {
+    try {
+      action.invoke();
+      fail("Operation was expected to fail —but it succeeded");
+    } catch (ServiceLaunchException e) {
+      assertExceptionDetails(e, exitCode, text);
+    }
+  }
+
+  /**
+   * Execute a closure, assert it fails with a given exit code and text.
+   * @param text text (can be "")
+   * @param action action
+   * @return
+   */
+  public void assertFailsWithExceptionClass(Class clazz,
+      String text,
+      Action action) throws Exception {
+    try {
+      action.invoke();
+      fail("Operation was expected to fail —but it succeeded");
+    } catch (Exception e) {
+      assertExceptionDetails(e, clazz, text);
+    }
+  }
+
+  public static void assertExceptionDetails(
+      ServiceLaunchException ex,
+      int exitCode) {
+    assertExceptionDetails(ex, exitCode, null);
+  }
+
+  /**
+   * Make an assertion about the exit code of an exception.
+   * @param ex exception
+   * @param exitCode exit code
+   * @param text error text to look for in the exception
+   */
+  public static void assertExceptionDetails(
+      ServiceLaunchException ex,
+      int exitCode,
+      String text) {
+    if (exitCode != ex.getExitCode()) {
+      String message = String.format("Wrong exit code, expected %d but" +
+              " got %d in %s", exitCode, ex.getExitCode(), ex);
+      LOG.warn(message, ex);
+      throw new AssertionError(message, ex);
+    }
+    if (SliderUtils.isSet(text)) {
+      if (!(ex.toString().contains(text))) {
+        String message = String.format("String match for \"%s\"failed in %s",
+            text, ex);
+        LOG.warn(message, ex);
+        throw new AssertionError(message, ex);
+      }
+    }
+  }
+
+  /**
+   * Make an assertion about the class of an exception.
+   * @param ex exception
+   * @param clazz exit code
+   * @param text error text to look for in the exception
+   */
+  static void assertExceptionDetails(
+      Exception ex,
+      Class clazz,
+      String text) throws Exception {
+    if (ex.getClass() != clazz) {
+      throw ex;
+    }
+    if (SliderUtils.isSet(text) && !(ex.toString().contains(text))) {
+      throw ex;
+    }
+  }
+
+  /**
+   * Launch the slider client with the specific args; no validation
+   * of return code takes place.
+   * @param conf configuration
+   * @param args arg list
+   * @return the launcher
+   */
+  protected static ServiceLauncher<SliderClient> execSliderCommand(
+      Configuration conf,
+      List args) throws Throwable {
+    ServiceLauncher<SliderClient> serviceLauncher =
+        new ServiceLauncher<>(sliderClientClassName);
+
+    LOG.debug("slider {}", SliderUtils.join(args, " ", false));
+    serviceLauncher.launchService(conf,
+        toArray(args),
+        false);
+    return serviceLauncher;
+  }
+
+  /**
+   * Launch a slider command to a given exit code.
+   * Most failures will trigger exceptions; this is for the exit code of the
+   * runService() call.
+   * @param exitCode desired exit code
+   * @param conf configuration
+   * @param args arg list
+   * @return the launcher
+   */
+  protected static ServiceLauncher<SliderClient> execSliderCommand(
+      int exitCode,
+      Configuration conf,
+      List args) throws Throwable {
+    ServiceLauncher<SliderClient> serviceLauncher = execSliderCommand(conf,
+        args);
+    assertEquals(exitCode, serviceLauncher.getServiceExitCode());
+    return serviceLauncher;
+  }
+
+  public static ServiceLauncher launch(Class serviceClass,
+      Configuration conf,
+      List<Object> args) throws
+      Throwable {
+    ServiceLauncher serviceLauncher =
+        new ServiceLauncher(serviceClass.getName());
+
+    String joinedArgs = SliderUtils.join(args, " ", false);
+    LOG.debug("slider {}", joinedArgs);
+
+    serviceLauncher.launchService(conf,
+        toArray(args),
+        false);
+    return serviceLauncher;
+  }
+
+  public static Throwable launchExpectingException(Class serviceClass,
+      Configuration conf,
+      String expectedText,
+      List args)
+      throws Throwable {
+    try {
+      ServiceLauncher launch = launch(serviceClass, conf, args);
+      throw new AssertionError("Expected an exception with text containing " +
+          expectedText + " -but the service completed with exit code " +
+          launch.getServiceExitCode());
+    } catch (AssertionError error) {
+      throw error;
+    } catch (Throwable thrown) {
+      if (SliderUtils.isSet(expectedText) && !thrown.toString().contains(
+          expectedText)) {
+        //not the right exception -rethrow
+        LOG.warn("Caught Exception did not contain expected text" +
+                 "\"" + expectedText + "\"");
+        throw thrown;
+      }
+      return thrown;
+    }
+  }
+
+
+  public static ServiceLauncher<SliderClient> launchClientAgainstRM(
+      String address,
+      List<String> args,
+      Configuration conf) throws Throwable {
+    assertNotNull(address);
+    LOG.info("Connecting to rm at {}", address);
+    if (!args.contains(Arguments.ARG_MANAGER)) {
+      args.add(Arguments.ARG_MANAGER);
+      args.add(address);
+    }
+    ServiceLauncher<SliderClient> launcher = execSliderCommand(conf, args);
+    return launcher;
+  }
+
+  /**
+   * Add a configuration parameter as a cluster configuration option.
+   * @param extraArgs extra arguments
+   * @param conf config
+   * @param option option
+   */
+  public static void addClusterConfigOption(
+      List<String> extraArgs,
+      YarnConfiguration conf,
+      String option) {
+
+    conf.getTrimmed(option);
+    extraArgs.add(ARG_OPTION);
+    extraArgs.add(option);
+    extraArgs.add(getRequiredConfOption(conf, option));
+  }
+
+  /**
+   * Assert that a path refers to a directory.
+   * @param fs filesystem
+   * @param path path of the directory
+   * @throws IOException on File IO problems
+   */
+  public static void assertIsDirectory(FileSystem fs,
+      Path path) throws IOException {
+    FileStatus fileStatus = fs.getFileStatus(path);
+    assertIsDirectory(fileStatus);
+  }
+
+  /**
+   * Assert that a path refers to a directory.
+   * @param fileStatus stats to check
+   */
+  public static void assertIsDirectory(FileStatus fileStatus) {
+    assertTrue("Should be a dir -but isn't: " + fileStatus,
+        fileStatus.isDirectory());
+  }
+
+  /**
+   * Assert that a path exists -but make no assertions as to the
+   * type of that entry.
+   *
+   * @param fileSystem filesystem to examine
+   * @param message message to include in the assertion failure message
+   * @param path path in the filesystem
+   * @throws IOException IO problems
+   */
+  public static void assertPathExists(
+      FileSystem fileSystem,
+      String message,
+      Path path) throws IOException {
+    if (!fileSystem.exists(path)) {
+      //failure, report it
+      fail(
+          message + ": not found \"" + path + "\" in " + path.getParent() +
+          "-" +
+          ls(fileSystem, path.getParent()));
+    }
+  }
+
+  /**
+   * Assert that a path does not exist.
+   *
+   * @param fileSystem filesystem to examine
+   * @param message message to include in the assertion failure message
+   * @param path path in the filesystem
+   * @throws IOException IO problems
+   */
+  public static void assertPathDoesNotExist(
+      FileSystem fileSystem,
+      String message,
+      Path path) throws IOException {
+    try {
+      FileStatus status = fileSystem.getFileStatus(path);
+      // a status back implies there is a file here
+      fail(message + ": unexpectedly found " + path + " as  " + status);
+    } catch (FileNotFoundException expected) {
+      //this is expected
+
+    }
+  }
+
+  /**
+   * Assert that a FileSystem.listStatus on a dir finds the subdir/child entry.
+   * @param fs filesystem
+   * @param dir directory to scan
+   * @param subdir full path to look for
+   * @throws IOException IO probles
+   */
+  public static void assertListStatusFinds(FileSystem fs,
+      Path dir,
+      Path subdir) throws IOException {
+    FileStatus[] stats = fs.listStatus(dir);
+    boolean found = false;
+    StringBuilder builder = new StringBuilder();
+    for (FileStatus stat : stats) {
+      builder.append(stat.toString()).append('\n');
+      if (stat.getPath().equals(subdir)) {
+        found = true;
+      }
+    }
+    assertTrue("Path " + subdir
+        + " not found in directory " + dir + ":" + builder,
+        found);
+  }
+
+  /**
+   * List a a path to string.
+   * @param fileSystem filesystem
+   * @param path directory
+   * @return a listing of the filestatuses of elements in the directory, one
+   * to a line, precedeed by the full path of the directory
+   * @throws IOException connectivity problems
+   */
+  public static String ls(FileSystem fileSystem, Path path)
+      throws IOException {
+    if (path == null) {
+      //surfaces when someone calls getParent() on something at the top of
+      // the path
+      return "/";
+    }
+    FileStatus[] stats;
+    String pathtext = "ls " + path;
+    try {
+      stats = fileSystem.listStatus(path);
+    } catch (FileNotFoundException e) {
+      return pathtext + " -file not found";
+    } catch (IOException e) {
+      return pathtext + " -failed: " + e;
+    }
+    return pathtext + fileStatsToString(stats, "\n");
+  }
+
+  /**
+   * Take an array of filestats and convert to a string (prefixed w/ a [01]
+   * counter).
+   * @param stats array of stats
+   * @param separator separator after every entry
+   * @return a stringified set
+   */
+  public static String fileStatsToString(FileStatus[] stats, String separator) {
+    StringBuilder buf = new StringBuilder(stats.length * 128);
+    for (int i = 0; i < stats.length; i++) {
+      buf.append(String.format("[%02d] %s", i, stats[i])).append(separator);
+    }
+    return buf.toString();
+  }
+
+  public static void waitWhileClusterLive(SliderClient sliderClient)
+      throws IOException, YarnException {
+    waitWhileClusterLive(sliderClient, 30000);
+  }
+
+  public static void dumpRegistryInstances(
+      Map<String, ServiceRecord> instances) {
+    describe("service registry slider instances");
+    for (Entry<String, ServiceRecord> it : instances.entrySet()) {
+      LOG.info(" {} : {}", it.getKey(), it.getValue());
+    }
+    describe("end list service registry slider instances");
+  }
+
+
+  public static void dumpRegistryInstanceIDs(List<String> instanceIds) {
+    describe("service registry instance IDs");
+    dumpCollection(instanceIds);
+  }
+
+  public static void dumpRegistryServiceTypes(Collection<String> entries) {
+    describe("service registry types");
+    dumpCollection(entries);
+  }
+
+  public static <V> void dumpCollection(Collection<V> entries) {
+    LOG.info("number of entries: {}", entries.size());
+    for (V it : entries) {
+      LOG.info(it.toString());
+    }
+  }
+
+  public static void dumpArray(Object[] entries) {
+    LOG.info("number of entries: {}", entries.length);
+    for (Object it : entries) {
+      LOG.info(it.toString());
+    }
+  }
+
+  public static <K, V> void dumpMap(Map<K, V> map) {
+    for (Entry<K, V> it : map.entrySet()) {
+      LOG.info("\"{}\": \"{}\"", it.getKey().toString(), it.getValue()
+          .toString());
+    }
+  }
+
+  /**
+   * Get a time option in seconds if set, otherwise the default value (also
+   * in seconds).
+   * This operation picks up the time value as a system property if set -that
+   * value overrides anything in the test file
+   * @param conf
+   * @param key
+   * @param defValMillis
+   * @return
+   */
+  public static int getTimeOptionMillis(
+      Configuration conf,
+      String key,
+      int defValMillis) {
+    int val = conf.getInt(key, 0);
+    val = Integer.getInteger(key, val);
+    int time = 1000 * val;
+    if (time == 0) {
+      time = defValMillis;
+    }
+    return time;
+  }
+
+  public void dumpConfigurationSet(PublishedConfigSet confSet) {
+    for (String key : confSet.keys()) {
+      PublishedConfiguration config = confSet.get(key);
+      LOG.info("{} -- {}", key, config.description);
+    }
+  }
+
+  /**
+   * Convert a file to a URI suitable for use in an argument.
+   * @param file file
+   * @return a URI string valid on all platforms
+   */
+  public String toURIArg(File file) {
+    return file.getAbsoluteFile().toURI().toString();
+  }
+
+  /**
+   * Assert a file exists; fails with a listing of the parent dir.
+   * @param text text for front of message
+   * @param file file to look for
+   * @throws FileNotFoundException
+   */
+  public void assertFileExists(String text, File file)
+      throws FileNotFoundException {
+    if (!file.exists()) {
+      File parent = file.getParentFile();
+      String[] files = parent.list();
+      StringBuilder builder = new StringBuilder();
+      builder.append(parent.getAbsolutePath());
+      builder.append(":\n");
+      for (String name : files) {
+        builder.append("  ");
+        builder.append(name);
+        builder.append("\n");
+      }
+      throw new FileNotFoundException(text + ": " + file + " not found in " +
+          builder);
+    }
+  }
+
+  /**
+   * Repeat a probe until it succeeds, if it does not execute a failure
+   * closure then raise an exception with the supplied message.
+   * @param probe probe
+   * @param timeout time in millis before giving up
+   * @param sleepDur sleep between failing attempts
+   * @param args map of arguments to the probe
+   * @param failIfUnsuccessful if the probe fails after all the attempts
+   * —should it raise an exception
+   * @param failureMessage message to include in exception raised
+   * @param failureHandler closure to invoke prior to the failure being raised
+   */
+  protected void repeatUntilSuccess(
+      String action,
+      Probe probe,
+      int timeout,
+      int sleepDur,
+      Map args,
+      boolean failIfUnsuccessful,
+      String failureMessage,
+      Action failureHandler) throws Exception {
+    LOG.debug("Probe {} timelimit {}", action, timeout);
+    if (timeout < 1000) {
+      fail("Timeout " + timeout + " too low: milliseconds are expected, not " +
+          "seconds");
+    }
+    int attemptCount = 1;
+    boolean succeeded = false;
+    boolean completed = false;
+    Duration duration = new Duration(timeout);
+    duration.start();
+    while (!completed) {
+      Outcome outcome = probe.invoke(args);
+      if (outcome.equals(Outcome.SUCCESS)) {
+        // success
+        LOG.debug("Success after {} attempt(s)", attemptCount);
+        succeeded = true;
+        completed = true;
+      } else if (outcome.equals(Outcome.RETRY)) {
+        // failed but retry possible
+        attemptCount++;
+        completed = duration.getLimitExceeded();
+        if (!completed) {
+          LOG.debug("Attempt {} failed", attemptCount);
+          try {
+            Thread.sleep(sleepDur);
+          } catch (InterruptedException e) {
+          }
+        }
+      } else if (outcome.equals(Outcome.FAIL)) {
+        // fast fail
+        LOG.debug("Fast fail of probe");
+        completed = true;
+      }
+    }
+    if (!succeeded) {
+      if (duration.getLimitExceeded()) {
+        LOG.info("probe timed out after {} and {} attempts", timeout,
+            attemptCount);
+      }
+      if (failureHandler != null) {
+        failureHandler.invoke();
+      }
+      if (failIfUnsuccessful) {
+        fail(failureMessage);
+      }
+    }
+  }
+
+  /**
+   * Get a value from a map; raise an assertion if it is not there.
+   * @param map map to look up
+   * @param key key
+   * @return the string value
+   */
+  public <K, V> String requiredMapValue(Map<K, V> map, String key) {
+    assertNotNull(map.get(key));
+    return map.get(key).toString();
+  }
+
+  public static void assertStringContains(String expected, String text) {
+    assertNotNull("null text", text);
+    if (!text.contains(expected)) {
+      String message = String.format("did not find %s in \"%s\"", expected,
+          text);
+      LOG.error(message);
+      fail(message);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestAssertions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestAssertions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestAssertions.java
new file mode 100644
index 0000000..9806ac3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestAssertions.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.utils;
+
+import org.apache.slider.api.resource.Application;
+import org.junit.Test;
+
+import java.util.Collections;
+
+/**
+ * Test for some of the command test base operations.
+ */
+public class TestAssertions {
+
+  public static final String CLUSTER_JSON = "json/cluster.json";
+
+  @Test
+  public void testNoInstances() throws Throwable {
+    Application application = new Application();
+    application.setContainers(null);
+    SliderTestUtils.assertContainersLive(application, "example", 0);
+  }
+
+  @Test
+  public void testEmptyInstances() throws Throwable {
+    Application application = new Application();
+    application.setContainers(Collections.emptyList());
+    SliderTestUtils.assertContainersLive(application, "example", 0);
+  }
+
+// TODO test metrics retrieval
+//  @Test
+//  public void testLiveInstances() throws Throwable {
+//    InputStream stream = getClass().getClassLoader().getResourceAsStream(
+//        CLUSTER_JSON);
+//    assertNotNull("could not load " + CLUSTER_JSON, stream);
+//    ClusterDescription liveCD = ClusterDescription.fromStream(stream);
+//    assertNotNull(liveCD);
+//    SliderTestUtils.assertContainersLive(liveCD, "SLEEP_LONG", 4);
+//    assertEquals((Integer) 1, liveCD.statistics.get("SLEEP_LONG").get(
+//        StatusKeys.STATISTICS_CONTAINERS_ANTI_AFFINE_PENDING));
+//  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestUtility.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestUtility.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestUtility.java
new file mode 100644
index 0000000..5493198
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/TestUtility.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.utils;
+
+import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
+import org.apache.commons.compress.archivers.zip.ZipArchiveOutputStream;
+import org.apache.commons.compress.utils.IOUtils;
+import org.junit.Assert;
+import org.junit.rules.TemporaryFolder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+
+/**
+ *  Various utility methods
+ *  Byte comparison methods are from
+ *  <code>org.apache.hadoop.fs.contract.ContractTestUtils</code>
+ */
+public class TestUtility {
+  protected static final Logger log =
+      LoggerFactory.getLogger(TestUtility.class);
+
+  public static void addDir(File dirObj, ZipArchiveOutputStream zipFile, String prefix) throws IOException {
+    for (File file : dirObj.listFiles()) {
+      if (file.isDirectory()) {
+        addDir(file, zipFile, prefix + file.getName() + File.separator);
+      } else {
+        log.info("Adding to zip - " + prefix + file.getName());
+        zipFile.putArchiveEntry(new ZipArchiveEntry(prefix + file.getName()));
+        IOUtils.copy(new FileInputStream(file), zipFile);
+        zipFile.closeArchiveEntry();
+      }
+    }
+  }
+
+  public static void zipDir(String zipFile, String dir) throws IOException {
+    File dirObj = new File(dir);
+    ZipArchiveOutputStream out = new ZipArchiveOutputStream(new FileOutputStream(zipFile));
+    log.info("Creating : {}", zipFile);
+    try {
+      addDir(dirObj, out, "");
+    } finally {
+      out.close();
+    }
+  }
+
+  public static String createAppPackage(
+      TemporaryFolder folder, String subDir, String pkgName, String srcPath) throws IOException {
+    String zipFileName;
+    File pkgPath = folder.newFolder(subDir);
+    File zipFile = new File(pkgPath, pkgName).getAbsoluteFile();
+    zipFileName = zipFile.getAbsolutePath();
+    TestUtility.zipDir(zipFileName, srcPath);
+    log.info("Created temporary zip file at {}", zipFileName);
+    return zipFileName;
+  }
+
+
+  /**
+   * Assert that tthe array original[0..len] and received[] are equal.
+   * A failure triggers the logging of the bytes near where the first
+   * difference surfaces.
+   * @param original source data
+   * @param received actual
+   * @param len length of bytes to compare
+   */
+  public static void compareByteArrays(byte[] original,
+      byte[] received,
+      int len) {
+    Assert.assertEquals("Number of bytes read != number written",
+        len, received.length);
+    int errors = 0;
+    int first_error_byte = -1;
+    for (int i = 0; i < len; i++) {
+      if (original[i] != received[i]) {
+        if (errors == 0) {
+          first_error_byte = i;
+        }
+        errors++;
+      }
+    }
+
+    if (errors > 0) {
+      String message = String.format(" %d errors in file of length %d",
+          errors, len);
+      log.warn(message);
+      // the range either side of the first error to print
+      // this is a purely arbitrary number, to aid user debugging
+      final int overlap = 10;
+      for (int i = Math.max(0, first_error_byte - overlap);
+           i < Math.min(first_error_byte + overlap, len);
+           i++) {
+        byte actual = received[i];
+        byte expected = original[i];
+        String letter = toChar(actual);
+        String line = String.format("[%04d] %2x %s\n", i, actual, letter);
+        if (expected != actual) {
+          line = String.format("[%04d] %2x %s -expected %2x %s\n",
+              i,
+              actual,
+              letter,
+              expected,
+              toChar(expected));
+        }
+        log.warn(line);
+      }
+      Assert.fail(message);
+    }
+  }
+  /**
+   * Convert a byte to a character for printing. If the
+   * byte value is < 32 -and hence unprintable- the byte is
+   * returned as a two digit hex value
+   * @param b byte
+   * @return the printable character string
+   */
+  public static String toChar(byte b) {
+    if (b >= 0x20) {
+      return Character.toString((char) b);
+    } else {
+      return String.format("%02x", b);
+    }
+  }
+
+  /**
+   * Convert a buffer to a string, character by character
+   * @param buffer input bytes
+   * @return a string conversion
+   */
+  public static String toChar(byte[] buffer) {
+    StringBuilder builder = new StringBuilder(buffer.length);
+    for (byte b : buffer) {
+      builder.append(toChar(b));
+    }
+    return builder.toString();
+  }
+
+  public static byte[] toAsciiByteArray(String s) {
+    char[] chars = s.toCharArray();
+    int len = chars.length;
+    byte[] buffer = new byte[len];
+    for (int i = 0; i < len; i++) {
+      buffer[i] = (byte) (chars[i] & 0xff);
+    }
+    return buffer;
+  }
+
+  /**
+   * Create a dataset for use in the tests; all data is in the range
+   * base to (base+modulo-1) inclusive
+   * @param len length of data
+   * @param base base of the data
+   * @param modulo the modulo
+   * @return the newly generated dataset
+   */
+  public static byte[] dataset(int len, int base, int modulo) {
+    byte[] dataset = new byte[len];
+    for (int i = 0; i < len; i++) {
+      dataset[i] = (byte) (base + (i % modulo));
+    }
+    return dataset;
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: YARN-6192. Move yarn-native-service API records into hadoop-yarn-slider-core module. Contributed by Jian He

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Container.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Container.java
new file mode 100644
index 0000000..c5dc627
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Container.java
@@ -0,0 +1,294 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.resource;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+import java.util.Date;
+import java.util.Objects;
+
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+/**
+ * An instance of a running application container.
+ **/
+
+@ApiModel(description = "An instance of a running application container")
+@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
+@XmlRootElement
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Container extends BaseResource {
+  private static final long serialVersionUID = -8955788064529288L;
+
+  private String id = null;
+  private Date launchTime = null;
+  private String ip = null;
+  private String hostname = null;
+  private String bareHost = null;
+  private ContainerState state = null;
+  private String componentName = null;
+  private Resource resource = null;
+  private Artifact artifact = null;
+  private Boolean privilegedContainer = null;
+
+  /**
+   * Unique container id of a running application, e.g.
+   * container_e3751_1458061340047_0008_01_000002.
+   **/
+  public Container id(String id) {
+    this.id = id;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Unique container id of a running application, e.g. container_e3751_1458061340047_0008_01_000002.")
+  @JsonProperty("id")
+  public String getId() {
+    return id;
+  }
+
+  public void setId(String id) {
+    this.id = id;
+  }
+
+  /**
+   * The time when the container was created, e.g. 2016-03-16T01:01:49.000Z.
+   * This will most likely be different from cluster launch time.
+   **/
+  public Container launchTime(Date launchTime) {
+    this.launchTime = launchTime == null ? null : (Date) launchTime.clone();
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "The time when the container was created, e.g. 2016-03-16T01:01:49.000Z. This will most likely be different from cluster launch time.")
+  @JsonProperty("launch_time")
+  public Date getLaunchTime() {
+    return launchTime == null ? null : (Date) launchTime.clone();
+  }
+
+  @XmlElement(name = "launch_time")
+  public void setLaunchTime(Date launchTime) {
+    this.launchTime = launchTime == null ? null : (Date) launchTime.clone();
+  }
+
+  /**
+   * IP address of a running container, e.g. 172.31.42.141. The IP address and
+   * hostname attribute values are dependent on the cluster/docker network setup
+   * as per YARN-4007.
+   **/
+  public Container ip(String ip) {
+    this.ip = ip;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "IP address of a running container, e.g. 172.31.42.141. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.")
+  @JsonProperty("ip")
+  public String getIp() {
+    return ip;
+  }
+
+  public void setIp(String ip) {
+    this.ip = ip;
+  }
+
+  /**
+   * Fully qualified hostname of a running container, e.g.
+   * ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and
+   * hostname attribute values are dependent on the cluster/docker network setup
+   * as per YARN-4007.
+   **/
+  public Container hostname(String hostname) {
+    this.hostname = hostname;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Fully qualified hostname of a running container, e.g. ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.")
+  @JsonProperty("hostname")
+  public String getHostname() {
+    return hostname;
+  }
+
+  public void setHostname(String hostname) {
+    this.hostname = hostname;
+  }
+
+  /**
+   * The bare node or host in which the container is running, e.g.
+   * cn008.example.com.
+   **/
+  public Container bareHost(String bareHost) {
+    this.bareHost = bareHost;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "The bare node or host in which the container is running, e.g. cn008.example.com.")
+  @JsonProperty("bare_host")
+  public String getBareHost() {
+    return bareHost;
+  }
+
+  @XmlElement(name = "bare_host")
+  public void setBareHost(String bareHost) {
+    this.bareHost = bareHost;
+  }
+
+  /**
+   * State of the container of an application.
+   **/
+  public Container state(ContainerState state) {
+    this.state = state;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "State of the container of an application.")
+  @JsonProperty("state")
+  public ContainerState getState() {
+    return state;
+  }
+
+  public void setState(ContainerState state) {
+    this.state = state;
+  }
+
+  /**
+   * Name of the component that this container instance belongs to.
+   **/
+  public Container componentName(String componentName) {
+    this.componentName = componentName;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Name of the component that this container instance belongs to.")
+  @JsonProperty("component_name")
+  public String getComponentName() {
+    return componentName;
+  }
+
+  @XmlElement(name = "component_name")
+  public void setComponentName(String componentName) {
+    this.componentName = componentName;
+  }
+
+  /**
+   * Resource used for this container.
+   **/
+  public Container resource(Resource resource) {
+    this.resource = resource;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Resource used for this container.")
+  @JsonProperty("resource")
+  public Resource getResource() {
+    return resource;
+  }
+
+  public void setResource(Resource resource) {
+    this.resource = resource;
+  }
+
+  /**
+   * Artifact used for this container.
+   **/
+  public Container artifact(Artifact artifact) {
+    this.artifact = artifact;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Artifact used for this container.")
+  @JsonProperty("artifact")
+  public Artifact getArtifact() {
+    return artifact;
+  }
+
+  public void setArtifact(Artifact artifact) {
+    this.artifact = artifact;
+  }
+
+  /**
+   * Container running in privileged mode or not.
+   **/
+  public Container privilegedContainer(Boolean privilegedContainer) {
+    this.privilegedContainer = privilegedContainer;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Container running in privileged mode or not.")
+  @JsonProperty("privileged_container")
+  public Boolean getPrivilegedContainer() {
+    return privilegedContainer;
+  }
+
+  public void setPrivilegedContainer(Boolean privilegedContainer) {
+    this.privilegedContainer = privilegedContainer;
+  }
+
+  @Override
+  public boolean equals(java.lang.Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    Container container = (Container) o;
+    return Objects.equals(this.id, container.id);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(id);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("class Container {\n");
+
+    sb.append("    id: ").append(toIndentedString(id)).append("\n");
+    sb.append("    launchTime: ").append(toIndentedString(launchTime))
+        .append("\n");
+    sb.append("    ip: ").append(toIndentedString(ip)).append("\n");
+    sb.append("    hostname: ").append(toIndentedString(hostname)).append("\n");
+    sb.append("    bareHost: ").append(toIndentedString(bareHost)).append("\n");
+    sb.append("    state: ").append(toIndentedString(state)).append("\n");
+    sb.append("    componentName: ").append(toIndentedString(componentName))
+        .append("\n");
+    sb.append("    resource: ").append(toIndentedString(resource)).append("\n");
+    sb.append("    artifact: ").append(toIndentedString(artifact)).append("\n");
+    sb.append("    privilegedContainer: ")
+        .append(toIndentedString(privilegedContainer)).append("\n");
+    sb.append("}");
+    return sb.toString();
+  }
+
+  /**
+   * Convert the given object to string with each line indented by 4 spaces
+   * (except the first line).
+   */
+  private String toIndentedString(java.lang.Object o) {
+    if (o == null) {
+      return "null";
+    }
+    return o.toString().replace("\n", "\n    ");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ContainerState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ContainerState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ContainerState.java
new file mode 100644
index 0000000..e79f6e0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ContainerState.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.resource;
+
+/**
+ * The current state of the container of an application.
+ **/
+public enum ContainerState {
+  INIT, READY;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Error.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Error.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Error.java
new file mode 100644
index 0000000..3cf9b29
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Error.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.resource;
+
+import io.swagger.annotations.ApiModelProperty;
+
+import java.util.Objects;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
+public class Error {
+
+  private Integer code = null;
+  private String message = null;
+  private String fields = null;
+
+  /**
+   **/
+  public Error code(Integer code) {
+    this.code = code;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "")
+  @JsonProperty("code")
+  public Integer getCode() {
+    return code;
+  }
+
+  public void setCode(Integer code) {
+    this.code = code;
+  }
+
+  /**
+   **/
+  public Error message(String message) {
+    this.message = message;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "")
+  @JsonProperty("message")
+  public String getMessage() {
+    return message;
+  }
+
+  public void setMessage(String message) {
+    this.message = message;
+  }
+
+  /**
+   **/
+  public Error fields(String fields) {
+    this.fields = fields;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "")
+  @JsonProperty("fields")
+  public String getFields() {
+    return fields;
+  }
+
+  public void setFields(String fields) {
+    this.fields = fields;
+  }
+
+  @Override
+  public boolean equals(java.lang.Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    Error error = (Error) o;
+    return Objects.equals(this.code, error.code)
+        && Objects.equals(this.message, error.message)
+        && Objects.equals(this.fields, error.fields);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(code, message, fields);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("class Error {\n");
+
+    sb.append("    code: ").append(toIndentedString(code)).append("\n");
+    sb.append("    message: ").append(toIndentedString(message)).append("\n");
+    sb.append("    fields: ").append(toIndentedString(fields)).append("\n");
+    sb.append("}");
+    return sb.toString();
+  }
+
+  /**
+   * Convert the given object to string with each line indented by 4 spaces
+   * (except the first line).
+   */
+  private String toIndentedString(java.lang.Object o) {
+    if (o == null) {
+      return "null";
+    }
+    return o.toString().replace("\n", "\n    ");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/PlacementPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/PlacementPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/PlacementPolicy.java
new file mode 100644
index 0000000..306338f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/PlacementPolicy.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.resource;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+import java.io.Serializable;
+import java.util.Objects;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+/**
+ * Placement policy of an instance of an application. This feature is in the
+ * works in YARN-4902.
+ **/
+
+@ApiModel(description = "Placement policy of an instance of an application. This feature is in the works in YARN-4902.")
+@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
+public class PlacementPolicy implements Serializable {
+  private static final long serialVersionUID = 4341110649551172231L;
+
+  private String label = null;
+
+  /**
+   * Assigns an app to a named partition of the cluster where the application
+   * desires to run (optional). If not specified all apps are submitted to a
+   * default label of the app owner. One or more labels can be setup for each
+   * application owner account with required constraints like no-preemption,
+   * sla-99999, preemption-ok, etc.
+   **/
+  public PlacementPolicy label(String label) {
+    this.label = label;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Assigns an app to a named partition of the cluster where the application desires to run (optional). If not specified all apps are submitted to a default label of the app owner. One or more labels can be setup for each application owner account with required constraints like no-preemption, sla-99999, preemption-ok, etc.")
+  @JsonProperty("label")
+  public String getLabel() {
+    return label;
+  }
+
+  public void setLabel(String label) {
+    this.label = label;
+  }
+
+  @Override
+  public boolean equals(java.lang.Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    PlacementPolicy placementPolicy = (PlacementPolicy) o;
+    return Objects.equals(this.label, placementPolicy.label);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(label);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("class PlacementPolicy {\n");
+
+    sb.append("    label: ").append(toIndentedString(label)).append("\n");
+    sb.append("}");
+    return sb.toString();
+  }
+
+  /**
+   * Convert the given object to string with each line indented by 4 spaces
+   * (except the first line).
+   */
+  private String toIndentedString(java.lang.Object o) {
+    if (o == null) {
+      return "null";
+    }
+    return o.toString().replace("\n", "\n    ");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ReadinessCheck.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ReadinessCheck.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ReadinessCheck.java
new file mode 100644
index 0000000..00bf29c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ReadinessCheck.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.resource;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+import java.io.Serializable;
+import java.util.Objects;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonValue;
+
+/**
+ * A custom command or a pluggable helper container to determine the readiness
+ * of a container of a component. Readiness for every application is different.
+ * Hence the need for a simple interface, with scope to support advanced
+ * usecases.
+ **/
+
+@ApiModel(description = "A custom command or a pluggable helper container to determine the readiness of a container of a component. Readiness for every application is different. Hence the need for a simple interface, with scope to support advanced usecases.")
+@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
+public class ReadinessCheck implements Serializable {
+  private static final long serialVersionUID = -3836839816887186801L;
+
+  public enum TypeEnum {
+    HTTP("HTTP");
+
+    private String value;
+
+    TypeEnum(String value) {
+      this.value = value;
+    }
+
+    @Override
+    @JsonValue
+    public String toString() {
+      return value;
+    }
+  }
+
+  private TypeEnum type = null;
+  private String uri = null;
+  private Artifact artifact = null;
+
+  /**
+   * E.g. HTTP (YARN will perform a simple REST call at a regular interval and
+   * expect a 204 No content).
+   **/
+  public ReadinessCheck type(TypeEnum type) {
+    this.type = type;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "E.g. HTTP (YARN will perform a simple REST call at a regular interval and expect a 204 No content).")
+  @JsonProperty("type")
+  public TypeEnum getType() {
+    return type;
+  }
+
+  public void setType(TypeEnum type) {
+    this.type = type;
+  }
+
+  /**
+   * Fully qualified REST uri endpoint.
+   **/
+  public ReadinessCheck uri(String uri) {
+    this.uri = uri;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", required = true, value = "Fully qualified REST uri endpoint.")
+  @JsonProperty("uri")
+  public String getUri() {
+    return uri;
+  }
+
+  public void setUri(String uri) {
+    this.uri = uri;
+  }
+
+  /**
+   * Artifact of the pluggable readiness check helper container (optional). If
+   * specified, this helper container typically hosts the http uri and
+   * encapsulates the complex scripts required to perform actual container
+   * readiness check. At the end it is expected to respond a 204 No content just
+   * like the simplified use case. This pluggable framework benefits application
+   * owners who can run applications without any packaging modifications. Note,
+   * artifacts of type docker only is supported for now.
+   **/
+  public ReadinessCheck artifact(Artifact artifact) {
+    this.artifact = artifact;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits application owners who can run applications without any packaging modifications. Note, artifacts of type docker only is supported for now.")
+  @JsonProperty("artifact")
+  public Artifact getArtifact() {
+    return artifact;
+  }
+
+  public void setArtifact(Artifact artifact) {
+    this.artifact = artifact;
+  }
+
+  @Override
+  public boolean equals(java.lang.Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    ReadinessCheck readinessCheck = (ReadinessCheck) o;
+    return Objects.equals(this.type, readinessCheck.type)
+        && Objects.equals(this.uri, readinessCheck.uri)
+        && Objects.equals(this.artifact, readinessCheck.artifact);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(type, uri, artifact);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("class ReadinessCheck {\n");
+
+    sb.append("    type: ").append(toIndentedString(type)).append("\n");
+    sb.append("    uri: ").append(toIndentedString(uri)).append("\n");
+    sb.append("    artifact: ").append(toIndentedString(artifact)).append("\n");
+    sb.append("}");
+    return sb.toString();
+  }
+
+  /**
+   * Convert the given object to string with each line indented by 4 spaces
+   * (except the first line).
+   */
+  private String toIndentedString(java.lang.Object o) {
+    if (o == null) {
+      return "null";
+    }
+    return o.toString().replace("\n", "\n    ");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e56c2281/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Resource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Resource.java
new file mode 100644
index 0000000..190121d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Resource.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.resource;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+import java.util.Objects;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+/**
+ * Resource determines the amount of resources (vcores, memory, network, etc.)
+ * usable by a container. This field determines the resource to be applied for
+ * all the containers of a component or application. The resource specified at
+ * the app (or global) level can be overriden at the component level. Only one
+ * of profile OR cpu &amp; memory are exepected. It raises a validation
+ * exception otherwise.
+ **/
+
+@ApiModel(description = "Resource determines the amount of resources (vcores, memory, network, etc.) usable by a container. This field determines the resource to be applied for all the containers of a component or application. The resource specified at the app (or global) level can be overriden at the component level. Only one of profile OR cpu & memory are exepected. It raises a validation exception otherwise.")
+@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00")
+public class Resource extends BaseResource implements Cloneable {
+  private static final long serialVersionUID = -6431667797380250037L;
+
+  private String profile = null;
+  private Integer cpus = null;
+  private String memory = null;
+
+  /**
+   * Each resource profile has a unique id which is associated with a
+   * cluster-level predefined memory, cpus, etc.
+   **/
+  public Resource profile(String profile) {
+    this.profile = profile;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Each resource profile has a unique id which is associated with a cluster-level predefined memory, cpus, etc.")
+  @JsonProperty("profile")
+  public String getProfile() {
+    return profile;
+  }
+
+  public void setProfile(String profile) {
+    this.profile = profile;
+  }
+
+  /**
+   * Amount of vcores allocated to each container (optional but overrides cpus
+   * in profile if specified).
+   **/
+  public Resource cpus(Integer cpus) {
+    this.cpus = cpus;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Amount of vcores allocated to each container (optional but overrides cpus in profile if specified).")
+  @JsonProperty("cpus")
+  public Integer getCpus() {
+    return cpus;
+  }
+
+  public void setCpus(Integer cpus) {
+    this.cpus = cpus;
+  }
+
+  /**
+   * Amount of memory allocated to each container (optional but overrides memory
+   * in profile if specified). Currently accepts only an integer value and
+   * default unit is in MB.
+   **/
+  public Resource memory(String memory) {
+    this.memory = memory;
+    return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB.")
+  @JsonProperty("memory")
+  public String getMemory() {
+    return memory;
+  }
+
+  public void setMemory(String memory) {
+    this.memory = memory;
+  }
+
+  @Override
+  public boolean equals(java.lang.Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    Resource resource = (Resource) o;
+    return Objects.equals(this.profile, resource.profile)
+        && Objects.equals(this.cpus, resource.cpus)
+        && Objects.equals(this.memory, resource.memory);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(profile, cpus, memory);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("class Resource {\n");
+
+    sb.append("    profile: ").append(toIndentedString(profile)).append("\n");
+    sb.append("    cpus: ").append(toIndentedString(cpus)).append("\n");
+    sb.append("    memory: ").append(toIndentedString(memory)).append("\n");
+    sb.append("}");
+    return sb.toString();
+  }
+
+  /**
+   * Convert the given object to string with each line indented by 4 spaces
+   * (except the first line).
+   */
+  private String toIndentedString(java.lang.Object o) {
+    if (o == null) {
+      return "null";
+    }
+    return o.toString().replace("\n", "\n    ");
+  }
+
+  @Override
+  public Object clone() throws CloneNotSupportedException {
+    return super.clone();
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: YARN-6335. Port slider's groovy unit tests to yarn native services. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockApplicationId.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockApplicationId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockApplicationId.java
new file mode 100644
index 0000000..01da470
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockApplicationId.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+/**
+ * Mock app id.
+ */
+public class MockApplicationId extends ApplicationId {
+
+  private int id;
+  private long clusterTimestamp;
+
+  public MockApplicationId() {
+  }
+
+  public MockApplicationId(int id) {
+    this.id = id;
+  }
+
+  public MockApplicationId(int id, long clusterTimestamp) {
+    this.id = id;
+    this.clusterTimestamp = clusterTimestamp;
+  }
+
+  @Override
+  public int getId() {
+    return id;
+  }
+
+  @Override
+  public void setId(int id) {
+    this.id = id;
+  }
+
+  @Override
+  public long getClusterTimestamp() {
+    return clusterTimestamp;
+  }
+
+  @Override
+  public void setClusterTimestamp(long clusterTimestamp) {
+    this.clusterTimestamp = clusterTimestamp;
+  }
+
+  @Override
+  public void build() {
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockClusterServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockClusterServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockClusterServices.java
new file mode 100644
index 0000000..2578595
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockClusterServices.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.slider.server.appmaster.state.AbstractClusterServices;
+
+/**
+ * Mock cluster services.
+ */
+public class MockClusterServices extends AbstractClusterServices {
+
+  @Override
+  public Resource newResource() {
+    return new MockResource(0, 0);
+  }
+
+  @Override
+  public Resource newResource(int memory, int cores) {
+    return new MockResource(memory, cores);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockContainer.java
new file mode 100644
index 0000000..148b7f6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockContainer.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.Token;
+
+/**
+ * Mock container.
+ */
+public class MockContainer extends Container {
+
+  private ContainerId id;
+  private NodeId nodeId;
+  private String nodeHttpAddress;
+  private Resource resource;
+  private Priority priority;
+  private Token containerToken;
+
+  @Override
+  public int compareTo(Container other) {
+    if (this.getId().compareTo(other.getId()) == 0) {
+      if (this.getNodeId().compareTo(other.getNodeId()) == 0) {
+        return this.getResource().compareTo(other.getResource());
+      } else {
+        return this.getNodeId().compareTo(other.getNodeId());
+      }
+    } else {
+      return this.getId().compareTo(other.getId());
+    }
+  }
+
+  @Override
+  public String toString() {
+    return "MockContainer{ id=" + id +
+           ", nodeHttpAddress='" + nodeHttpAddress + "'," +
+           " priority=" + priority + " }";
+  }
+
+  @Override
+  public ContainerId getId() {
+    return id;
+  }
+
+  @Override
+  public void setId(ContainerId id) {
+    this.id = id;
+  }
+
+  @Override
+  public NodeId getNodeId() {
+    return nodeId;
+  }
+
+  @Override
+  public void setNodeId(NodeId nodeId) {
+    this.nodeId = nodeId;
+  }
+
+  @Override
+  public String getNodeHttpAddress() {
+    return nodeHttpAddress;
+  }
+
+  @Override
+  public void setNodeHttpAddress(String nodeHttpAddress) {
+    this.nodeHttpAddress = nodeHttpAddress;
+  }
+
+  @Override
+  public Resource getResource() {
+    return resource;
+  }
+
+  @Override
+  public void setResource(Resource resource) {
+    this.resource = resource;
+  }
+
+  @Override
+  public Priority getPriority() {
+    return priority;
+  }
+
+  @Override
+  public void setPriority(Priority priority) {
+    this.priority = priority;
+  }
+
+  @Override
+  public Token getContainerToken() {
+    return containerToken;
+  }
+
+  @Override
+  public void setContainerToken(Token containerToken) {
+    this.containerToken = containerToken;
+  }
+
+  @Override
+  public ExecutionType getExecutionType() {
+    return null;
+  }
+
+  @Override
+  public void setExecutionType(ExecutionType executionType) {
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockContainerId.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockContainerId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockContainerId.java
new file mode 100644
index 0000000..3cbc7e5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockContainerId.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+/**
+ * Mock container id.
+ */
+public class MockContainerId extends ContainerId implements Cloneable {
+
+  private static final MockApplicationAttemptId DEFAULT_APP_ATTEMPT_ID =
+      new MockApplicationAttemptId(new MockApplicationId(1), 1);
+
+  private long containerId;
+  private ApplicationAttemptId applicationAttemptId;
+
+  MockContainerId() {
+  }
+
+  /**
+   * Sets up a default app Attempt ID.
+   * @param containerId
+   */
+  MockContainerId(long containerId) {
+    this.containerId = containerId;
+    this.applicationAttemptId = DEFAULT_APP_ATTEMPT_ID;
+  }
+
+  public MockContainerId(ApplicationAttemptId applicationAttemptId,
+      long containerId) {
+    this.containerId = containerId;
+    this.applicationAttemptId = applicationAttemptId;
+  }
+
+  MockContainerId(ContainerId that) {
+    containerId = that.getContainerId();
+    applicationAttemptId = that.getApplicationAttemptId();
+  }
+
+  @Deprecated
+  @Override
+  public int getId() {
+    return (int) containerId;
+  }
+
+  // TODO: Temporarily adding it back
+  void setId(int id) {
+    containerId = (long) id;
+  }
+
+  @Override
+  public long getContainerId() {
+    return this.containerId;
+  }
+
+  @Override
+  public void setContainerId(long id) {
+    this.containerId = id;
+  }
+
+  @Override
+  public ApplicationAttemptId getApplicationAttemptId() {
+    return applicationAttemptId;
+  }
+
+  @Override
+  public void setApplicationAttemptId(ApplicationAttemptId
+      applicationAttemptId) {
+    this.applicationAttemptId = applicationAttemptId;
+  }
+
+  @Override
+  public void build() {
+
+  }
+
+  @Override
+  public String toString() {
+    return "mockcontainer_" + containerId;
+  }
+
+  @Override
+  protected Object clone() throws CloneNotSupportedException {
+    return super.clone();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFactory.java
new file mode 100644
index 0000000..2ac5087
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFactory.java
@@ -0,0 +1,270 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeReportPBImpl;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.providers.PlacementPolicy;
+import org.apache.slider.providers.ProviderRole;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+
+import static org.apache.slider.api.ResourceKeys.COMPONENT_PLACEMENT_POLICY;
+
+/**
+ * Factory for creating things.
+ */
+public class MockFactory implements MockRoles {
+
+  public static final int NODE_FAILURE_THRESHOLD = 2;
+
+  public static final MockFactory INSTANCE = new MockFactory();
+
+  /**
+   * Basic role.
+   */
+  public static final ProviderRole PROVIDER_ROLE0 = new ProviderRole(
+      MockRoles.ROLE0,
+      0,
+      PlacementPolicy.DEFAULT,
+      NODE_FAILURE_THRESHOLD,
+      1,
+      ResourceKeys.DEF_YARN_LABEL_EXPRESSION);
+  /**
+   * role 1 is strict. timeout should be irrelevant; same as failures
+   */
+  public static final ProviderRole PROVIDER_ROLE1 = new ProviderRole(
+      MockRoles.ROLE1,
+      1,
+      PlacementPolicy.STRICT,
+      NODE_FAILURE_THRESHOLD,
+      1,
+      ResourceKeys.DEF_YARN_LABEL_EXPRESSION);
+
+  /**
+   * role 2: longer delay.
+   */
+  public static final ProviderRole PROVIDER_ROLE2 = new ProviderRole(
+      MockRoles.ROLE2,
+      2,
+      PlacementPolicy.ANYWHERE,
+      NODE_FAILURE_THRESHOLD,
+      2,
+      ResourceKeys.DEF_YARN_LABEL_EXPRESSION);
+
+  /**
+   * Patch up a "role2" role to have anti-affinity set.
+   */
+  public static final ProviderRole AAROLE_2 = new ProviderRole(
+      MockRoles.ROLE2,
+      2,
+      PlacementPolicy.ANTI_AFFINITY_REQUIRED,
+      NODE_FAILURE_THRESHOLD,
+      2,
+      null);
+
+  /**
+   * Patch up a "role1" role to have anti-affinity set and GPI as the label.
+   */
+  public static final ProviderRole AAROLE_1_GPU = new ProviderRole(
+      MockRoles.ROLE1,
+      1,
+      PlacementPolicy.ANTI_AFFINITY_REQUIRED,
+      NODE_FAILURE_THRESHOLD,
+      1,
+      MockRoles.LABEL_GPU);
+
+  private int appIdCount;
+  private int attemptIdCount;
+  private int containerIdCount;
+
+  private ApplicationId appId = newAppId();
+  private ApplicationAttemptId attemptId = newApplicationAttemptId(appId);
+
+  /**
+   * List of roles.
+   */
+  public static final List<ProviderRole> ROLES = Arrays.asList(
+          PROVIDER_ROLE0,
+          PROVIDER_ROLE1,
+          PROVIDER_ROLE2
+      );
+
+  public static final int ROLE_COUNT = ROLES.size();
+
+  MockContainerId newContainerId() {
+    return newContainerId(attemptId);
+  }
+
+  MockContainerId newContainerId(ApplicationAttemptId attemptId0) {
+    MockContainerId cid = new MockContainerId(attemptId0, containerIdCount++);
+    return cid;
+  }
+
+  MockApplicationAttemptId newApplicationAttemptId(ApplicationId appId0) {
+    MockApplicationAttemptId id = new MockApplicationAttemptId(appId0,
+        attemptIdCount++);
+    return id;
+  }
+
+  MockApplicationId newAppId() {
+    MockApplicationId id = new MockApplicationId();
+    id.setId(appIdCount++);
+    return id;
+  }
+
+  public MockNodeId newNodeId(String host) {
+    return new MockNodeId(host);
+  }
+
+  MockContainer newContainer(ContainerId cid) {
+    MockContainer c = new MockContainer();
+    c.setId(cid);
+    return c;
+  }
+
+  public MockContainer newContainer() {
+    return newContainer(newContainerId());
+  }
+
+  public MockContainer newContainer(NodeId nodeId, Priority priority) {
+    MockContainer container = newContainer(newContainerId());
+    container.setNodeId(nodeId);
+    container.setPriority(priority);
+    return container;
+  }
+
+  /**
+   * Build a new container  using the request to supply priority and resource.
+   * @param req request
+   * @param host hostname to assign to
+   * @return the container
+   */
+  public MockContainer newContainer(AMRMClient.ContainerRequest req, String
+      host) {
+    MockContainer container = newContainer(newContainerId());
+    container.setResource(req.getCapability());
+    container.setPriority(req.getPriority());
+    container.setNodeId(new MockNodeId(host));
+    return container;
+  }
+
+  /**
+   * Create a new instance with the given components definined in the
+   * resources section.
+   * @param r1
+   * @param r2
+   * @param r3
+   * @return
+   */
+  public Application newApplication(long r1, long r2, long r3) {
+    Application application = new Application();
+    application.getConfiguration().setProperty(ResourceKeys
+        .NODE_FAILURE_THRESHOLD, Integer.toString(NODE_FAILURE_THRESHOLD));
+    List<Component> components = application.getComponents();
+    Component c1 = new Component().name(ROLE0).numberOfContainers(r1);
+    c1.getConfiguration().setProperty(COMPONENT_PLACEMENT_POLICY,
+        Integer.toString(PlacementPolicy.DEFAULT));
+    Component c2 = new Component().name(ROLE1).numberOfContainers(r2);
+    c2.getConfiguration().setProperty(COMPONENT_PLACEMENT_POLICY,
+        Integer.toString(PlacementPolicy.STRICT));
+    Component c3 = new Component().name(ROLE2).numberOfContainers(r3);
+    c3.getConfiguration().setProperty(COMPONENT_PLACEMENT_POLICY,
+        Integer.toString(PlacementPolicy.ANYWHERE));
+    components.add(c1);
+    components.add(c2);
+    components.add(c3);
+    return application;
+  }
+
+  public MockResource newResource(int memory, int vcores) {
+    return new MockResource(memory, vcores);
+  }
+
+  ContainerStatus newContainerStatus() {
+    return newContainerStatus(null, null, "", 0);
+  }
+
+  ContainerStatus newContainerStatus(ContainerId containerId,
+      ContainerState containerState, String diagnostics, int exitStatus) {
+    return ContainerStatus.newInstance(containerId, containerState,
+        diagnostics, exitStatus);
+  }
+
+  /**
+   * Create a single instance.
+   * @param hostname
+   * @param nodeState
+   * @param label
+   */
+  public NodeReport newNodeReport(String hostname, NodeState nodeState,
+      String label) {
+    NodeId nodeId = NodeId.newInstance(hostname, 80);
+    Integer.valueOf(hostname, 16);
+    return newNodeReport(hostname, nodeId, nodeState, label);
+  }
+
+  NodeReport newNodeReport(
+      String hostname,
+      NodeId nodeId,
+      NodeState nodeState,
+      String label) {
+    NodeReport report = new NodeReportPBImpl();
+    HashSet<String> nodeLabels = new HashSet<>();
+    nodeLabels.add(label);
+    report.setNodeId(nodeId);
+    report.setNodeLabels(nodeLabels);
+    report.setNodeState(nodeState);
+    report.setHttpAddress("http$hostname:80");
+    return report;
+  }
+
+  /**
+   * Create a list of instances -one for each hostname.
+   * @param hostnames hosts
+   * @return
+   */
+  public List<NodeReport> createNodeReports(
+      List<String> hostnames, NodeState nodeState, String label) {
+    if (nodeState == null) {
+      nodeState = NodeState.RUNNING;
+    }
+    List<NodeReport> reports = new ArrayList<>();
+    for (String name : hostnames) {
+      reports.add(newNodeReport(name, nodeState, label));
+    }
+    return reports;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFileSystem.java
new file mode 100644
index 0000000..72d1665
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFileSystem.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.fs.FilterFileSystem;
+import org.apache.hadoop.fs.Path;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+class MockFileSystem extends FilterFileSystem{
+  @Override
+  public Path resolvePath(Path p) throws IOException {
+    return new Path("hdfs://localhost/", p);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockNodeId.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockNodeId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockNodeId.java
new file mode 100644
index 0000000..9d2379a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockNodeId.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.yarn.api.records.NodeId;
+
+/**
+ * Mock node id.
+ */
+public class MockNodeId extends NodeId {
+  private String host;
+  private int port;
+
+  public MockNodeId() {
+  }
+
+  MockNodeId(String host) {
+    this.host = host;
+  }
+
+  public MockNodeId(String host, int port) {
+    this.host = host;
+    this.port = port;
+  }
+
+  public String getHost() {
+    return host;
+  }
+
+  public void setHost(String host) {
+    this.host = host;
+  }
+
+  public int getPort() {
+    return port;
+  }
+
+  public void setPort(int port) {
+    this.port = port;
+  }
+
+  @Override
+  protected void build() {
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockPriority.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockPriority.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockPriority.java
new file mode 100644
index 0000000..36f97cc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockPriority.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.yarn.api.records.Priority;
+
+/**
+ * Mock priority.
+ */
+public class MockPriority extends Priority {
+
+  private int priority;
+
+  public MockPriority(int priority) {
+    this.priority = priority;
+  }
+
+  MockPriority() {
+  }
+
+  @Override
+  public int getPriority() {
+    return priority;
+  }
+
+  @Override
+  public void setPriority(int priority) {
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java
new file mode 100644
index 0000000..112a5ac
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.LifecycleEvent;
+import org.apache.hadoop.service.ServiceStateChangeListener;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.core.launch.ContainerLauncher;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.providers.ProviderService;
+import org.apache.slider.server.appmaster.state.StateAccessForProviders;
+import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Mock provider service.
+ */
+public class MockProviderService implements ProviderService {
+
+  @Override
+  public String getName() {
+    return null;
+  }
+
+  @Override
+  public void init(Configuration config) {
+  }
+
+  @Override
+  public void start() {
+  }
+
+  @Override
+  public void stop() {
+  }
+
+  @Override
+  public void close() throws IOException {
+  }
+
+  @Override
+  public void registerServiceListener(ServiceStateChangeListener listener) {
+  }
+
+  @Override
+  public void unregisterServiceListener(ServiceStateChangeListener listener) {
+  }
+
+  @Override
+  public Configuration getConfig() {
+    return null;
+  }
+
+  public STATE getServiceState() {
+    return null;
+  }
+
+  @Override
+  public long getStartTime() {
+    return 0;
+  }
+
+  @Override
+  public boolean isInState(STATE state) {
+    return false;
+  }
+
+  @Override
+  public Throwable getFailureCause() {
+    return null;
+  }
+
+  @Override
+  public STATE getFailureState() {
+    return null;
+  }
+
+  @Override
+  public boolean waitForServiceToStop(long timeout) {
+    return false;
+  }
+
+  @Override
+  public List<LifecycleEvent> getLifecycleHistory() {
+    return null;
+  }
+
+  @Override
+  public Map<String, String> getBlockers() {
+    return null;
+  }
+
+  @Override
+  public void buildContainerLaunchContext(ContainerLauncher containerLauncher,
+      Application application, Container container, ProviderRole providerRole,
+      SliderFileSystem sliderFileSystem) throws IOException, SliderException {
+
+  }
+
+  @Override
+  public void setAMState(StateAccessForProviders stateAccessForProviders) {
+
+  }
+
+  @Override
+  public void bindToYarnRegistry(YarnRegistryViewForProviders yarnRegistry) {
+
+  }
+
+  @Override
+  public boolean processContainerStatus(ContainerId containerId,
+      ContainerStatus status) {
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRMOperationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRMOperationHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRMOperationHandler.java
new file mode 100644
index 0000000..3dd764a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRMOperationHandler.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.apache.slider.server.appmaster.operations.RMOperationHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Mock RM operation handler.
+ */
+public class MockRMOperationHandler extends RMOperationHandler {
+  protected static final Logger LOG =
+      LoggerFactory.getLogger(MockRMOperationHandler.class);
+
+  private List<AbstractRMOperation> operations = new ArrayList<>();
+  private int requests;
+  private int releases;
+  // number available to cancel
+  private int availableToCancel = 0;
+  // count of cancelled values. This must be explicitly set
+  private int cancelled;
+  // number blacklisted
+  private int blacklisted = 0;
+
+  @Override
+  public void releaseAssignedContainer(ContainerId containerId) {
+    operations.add(new ContainerReleaseOperation(containerId));
+    LOG.info("Releasing container ID " + containerId.getContainerId());
+    releases++;
+  }
+
+  @Override
+  public void addContainerRequest(AMRMClient.ContainerRequest req) {
+    operations.add(new ContainerRequestOperation(req));
+    LOG.info("Requesting container role #" + req.getPriority());
+    requests++;
+  }
+
+  @Override
+  public int cancelContainerRequests(
+      Priority priority1,
+      Priority priority2,
+      int count) {
+    int releaseable = Math.min(count, availableToCancel);
+    availableToCancel -= releaseable;
+    cancelled += releaseable;
+    return releaseable;
+  }
+
+  @Override
+  public void cancelSingleRequest(AMRMClient.ContainerRequest request) {
+    // here assume that there is a copy of this request in the list
+    if (availableToCancel > 0) {
+      availableToCancel--;
+      cancelled++;
+    }
+  }
+
+  @Override
+  public void updateBlacklist(List<String> blacklistAdditions, List<String>
+      blacklistRemovals) {
+    blacklisted += blacklistAdditions.size();
+    blacklisted -= blacklistRemovals.size();
+  }
+
+  /**
+   * Clear the history.
+   */
+  public void clear() {
+    operations.clear();
+    releases = 0;
+    requests = 0;
+  }
+
+  public AbstractRMOperation getFirstOp() {
+    return operations.get(0);
+  }
+
+  public int getNumReleases() {
+    return releases;
+  }
+
+  public void setAvailableToCancel(int num) {
+    this.availableToCancel = num;
+  }
+
+  public int getAvailableToCancel() {
+    return availableToCancel;
+  }
+
+  public int getBlacklisted() {
+    return blacklisted;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRecordFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRecordFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRecordFactory.java
new file mode 100644
index 0000000..eb34586
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRecordFactory.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+/**
+ * Node report for testing.
+ */
+class MockRecordFactory {
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRegistryOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRegistryOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRegistryOperations.java
new file mode 100644
index 0000000..4917f1b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRegistryOperations.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.registry.client.types.RegistryPathStatus;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.service.AbstractService;
+
+import java.util.List;
+
+/**
+ * Simple stub registry for when one is needed for its API, but the operations
+ * are not actually required.
+ */
+class MockRegistryOperations extends AbstractService implements
+    RegistryOperations {
+
+  MockRegistryOperations() {
+    super("mock");
+  }
+
+  @Override
+  public boolean mknode(String path, boolean createParents) {
+    return true;
+  }
+
+  @Override
+  public void bind(String path, ServiceRecord record, int flags) {
+  }
+
+  @Override
+  public ServiceRecord resolve(String path) throws PathNotFoundException {
+    throw new PathNotFoundException(path);
+  }
+
+  @Override
+  public RegistryPathStatus stat(String path) throws PathNotFoundException {
+    throw new PathNotFoundException(path);
+  }
+
+  @Override
+  public boolean exists(String path) {
+    return false;
+  }
+
+  @Override
+  public List<String> list(String path) throws PathNotFoundException {
+    throw new PathNotFoundException(path);
+  }
+
+  @Override
+  public void delete(String path, boolean recursive) {
+
+  }
+
+  @Override
+  public boolean addWriteAccessor(String id, String pass) {
+    return true;
+  }
+
+  @Override
+  public void clearWriteAccessors() {
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockResource.java
new file mode 100644
index 0000000..3a2ccd7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockResource.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+
+/**
+ * Mock resource.
+ */
+public class MockResource extends Resource {
+  private int memory;
+  private int virtualCores;
+
+  public MockResource(int memory, int vcores) {
+    this.memory = memory;
+    this.virtualCores = vcores;
+  }
+
+  @Override
+  public int compareTo(Resource other) {
+    long diff = this.getMemorySize() - other.getMemorySize();
+    if (diff == 0) {
+      diff = this.getVirtualCores() - other.getVirtualCores();
+    }
+    return diff == 0 ? 0 : (diff > 0 ? 1 : -1);
+  }
+
+  @Override
+  public long getMemorySize() {
+    return memory;
+  }
+
+  @Override
+  public void setMemorySize(long memorySize) {
+    memory = (int) memorySize;
+  }
+
+  @Override
+  public int getVirtualCores() {
+    return virtualCores;
+  }
+
+  @Override
+  public void setVirtualCores(int vCores) {
+    this.virtualCores = vCores;
+  }
+
+  @Deprecated
+  @Override
+  public int getMemory() {
+    return memory;
+  }
+
+  @Deprecated
+  @Override
+  public void setMemory(int memory) {
+    this.memory = memory;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRoleHistory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRoleHistory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRoleHistory.java
new file mode 100644
index 0000000..8e88b0d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRoleHistory.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Subclass to enable access to some of the protected methods.
+ */
+public class MockRoleHistory extends RoleHistory {
+
+  /**
+   * Take a list of provider roles and build the history from them,
+   * dynamically creating the role status entries on the way.
+   * @param providerRoles provider role list
+   * @throws BadConfigException configuration problem with the role list
+   */
+  public MockRoleHistory(List<ProviderRole> providerRoles) throws
+      BadConfigException {
+    super(convertRoles(providerRoles), new MockClusterServices());
+  }
+
+  static List<RoleStatus> convertRoles(List<ProviderRole> providerRoles) {
+    List<RoleStatus> statuses = new ArrayList<>();
+    for (ProviderRole role : providerRoles) {
+      statuses.add(new RoleStatus(role));
+    }
+    return statuses;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRoles.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRoles.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRoles.java
new file mode 100644
index 0000000..bad82bd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRoles.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.model.mock;
+
+/**
+ * Mock role constants.
+ */
+public interface MockRoles {
+
+  String ROLE0 = "role0";
+  String ROLE1 = "role1";
+  String ROLE2 = "role2";
+  String LABEL_GPU = "gpu";
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockYarnCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockYarnCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockYarnCluster.java
new file mode 100644
index 0000000..6b685a0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockYarnCluster.java
@@ -0,0 +1,342 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+/**
+ * Models the cluster itself: a set of mock cluster nodes.
+ *
+ * nodes retain the slot model with a limit of 2^8 slots/host -this
+ * lets us use 24 bits of the container ID for hosts, and so simulate
+ * larger hosts.
+ *
+ * upper 32: index into nodes in the cluster
+ * NodeID hostname is the index in hex format; this is parsed down to the index
+ * to resolve the host
+ *
+ * Important: container IDs will be reused as containers get recycled. This
+ * is not an attempt to realistically mimic a real YARN cluster, just
+ * simulate it enough for Slider to explore node re-use and its handling
+ * of successful and unsuccessful allocations.
+ *
+ * There is little or no checking of valid parameters in here -this is for
+ * test use, not production.
+ */
+public class MockYarnCluster {
+  protected static final Logger LOG =
+      LoggerFactory.getLogger(MockYarnCluster.class);
+
+  private final int clusterSize;
+  private final int containersPerNode;
+  private MockYarnClusterNode[] nodes;
+
+  MockYarnCluster(int clusterSize, int containersPerNode) {
+    this.clusterSize = clusterSize;
+    this.containersPerNode = containersPerNode;
+    build();
+  }
+
+  public int getClusterSize() {
+    return clusterSize;
+  }
+
+  @Override
+  public String toString() {
+    return "MockYarnCluster size=" + clusterSize + ", capacity=" +
+        totalClusterCapacity()+ ", in use=" + containersInUse();
+  }
+
+  /**
+   * Build the cluster.
+   */
+  private void build() {
+    nodes = new MockYarnClusterNode[clusterSize];
+    for (int i = 0; i < clusterSize; i++) {
+      nodes[i] = new MockYarnClusterNode(i, containersPerNode);
+    }
+  }
+
+  public MockYarnClusterNode nodeAt(int index) {
+    return nodes[index];
+  }
+
+  MockYarnClusterNode lookup(String hostname) {
+    int index = Integer.valueOf(hostname, 16);
+    return nodeAt(index);
+  }
+
+  MockYarnClusterNode lookup(NodeId nodeId) {
+    return lookup(nodeId.getHost());
+  }
+
+  MockYarnClusterNode lookupOwner(ContainerId cid) {
+    return nodeAt(extractHost(cid.getContainerId()));
+  }
+
+  /**
+   * Release a container: return true if it was actually in use.
+   * @param cid container ID
+   * @return the container released
+   */
+  MockYarnClusterContainer release(ContainerId cid) {
+    int host = extractHost(cid.getContainerId());
+    MockYarnClusterContainer inUse = nodeAt(host).release(cid.getContainerId());
+    LOG.debug("Released {} inuse={}", cid, inUse);
+    return inUse;
+  }
+
+  int containersInUse() {
+    int count = 0;
+    for (MockYarnClusterNode it : nodes) {
+      count += it.containersInUse();
+    }
+    return count;
+  }
+
+  /**
+   * Containers free.
+   * @return
+   */
+  int containersFree() {
+    return totalClusterCapacity() - containersInUse();
+  }
+
+  int totalClusterCapacity() {
+    return clusterSize * containersPerNode;
+  }
+
+  /**
+   * Reset all the containers.
+   */
+  public void reset() {
+    for (MockYarnClusterNode node : nodes) {
+      node.reset();
+    }
+  }
+
+  /**
+   * Bulk allocate the specific number of containers on a range of the cluster.
+   * @param startNode start of the range
+   * @param endNode end of the range
+   * @param count count
+   * @return the number actually allocated -it will be less the count supplied
+   * if the node was full
+   */
+  public int bulkAllocate(int startNode, int endNode, int count) {
+    int total = 0;
+    for (int i = startNode; i <= endNode; i++) {
+      total += nodeAt(i).bulkAllocate(count).size();
+    }
+    return total;
+  }
+
+  /**
+   * Get the list of node reports. These are not cloned; updates will persist
+   * in the nodemap
+   * @return current node report list
+   */
+  List<NodeReport> getNodeReports() {
+    List<NodeReport> reports = new ArrayList<>();
+
+    for (MockYarnClusterNode n : nodes) {
+      reports.add(n.nodeReport);
+    }
+    return reports;
+  }
+
+  /**
+   * Model cluster nodes on the simpler "slot" model than the YARN-era
+   * resource allocation model. Why? Easier to implement scheduling.
+   * Of course, if someone does want to implement the full process...
+   *
+   */
+  public static class MockYarnClusterNode {
+
+    private final int nodeIndex;
+    private final String hostname;
+    private List<String> labels = new ArrayList<>();
+    private final MockNodeId nodeId;
+    private final MockYarnClusterContainer[] containers;
+    private boolean offline;
+    private NodeReport nodeReport;
+
+    public MockYarnClusterNode(int index, int size) {
+      nodeIndex = index;
+      hostname = String.format(Locale.ENGLISH, "%08x", index);
+      nodeId = new MockNodeId(hostname, 0);
+
+      containers = new MockYarnClusterContainer[size];
+      for (int i = 0; i < size; i++) {
+        int cid = makeCid(index, i);
+        MockContainerId mci = new MockContainerId(cid);
+        containers[i] = new MockYarnClusterContainer(mci);
+      }
+
+      nodeReport = MockFactory.INSTANCE.newNodeReport(hostname, nodeId,
+          NodeState.RUNNING, "");
+    }
+
+    public String getHostname() {
+      return hostname;
+    }
+
+    public NodeId getNodeId() {
+      return nodeId;
+    }
+
+    /**
+     * Look up a container.
+     * @param containerId
+     * @return
+     */
+    public MockYarnClusterContainer lookup(int containerId) {
+      return containers[extractContainer(containerId)];
+    }
+
+    /**
+     * Go offline; release all containers.
+     */
+    public void goOffline() {
+      if (!offline) {
+        offline = true;
+        reset();
+      }
+    }
+
+    public void goOnline() {
+      offline = false;
+    }
+
+    /**
+     * Allocate a container -if one is available.
+     * @return the container or null for none free
+     * -or the cluster node is offline
+     */
+    public MockYarnClusterContainer allocate() {
+      if (!offline) {
+        for (int i = 0; i < containers.length; i++) {
+          MockYarnClusterContainer c = containers[i];
+          if (!c.busy) {
+            c.busy = true;
+            return c;
+          }
+        }
+      }
+      return null;
+    }
+
+    /**
+     * Bulk allocate the specific number of containers.
+     * @param count count
+     * @return the list actually allocated -it will be less the count supplied
+     * if the node was full
+     */
+    public List<MockYarnClusterContainer> bulkAllocate(int count) {
+      List<MockYarnClusterContainer> result = new ArrayList<>();
+      for (int i = 0; i < count; i++) {
+        MockYarnClusterContainer allocation = allocate();
+        if (allocation == null) {
+          break;
+        }
+        result.add(allocation);
+      }
+      return result;
+    }
+
+    /**
+     * Release a container.
+     * @param cid container ID
+     * @return the container if the container was busy before the release
+     */
+    public MockYarnClusterContainer release(long cid) {
+      MockYarnClusterContainer container = containers[extractContainer(cid)];
+      boolean b = container.busy;
+      container.busy = false;
+      return b? container: null;
+    }
+
+    public String httpAddress() {
+      return "http://$hostname/";
+    }
+
+    /**
+     * Reset all the containers.
+     */
+    public void reset() {
+      for (MockYarnClusterContainer cont : containers) {
+        cont.reset();
+      }
+    }
+
+    public int containersInUse() {
+      int c = 0;
+      for (MockYarnClusterContainer cont : containers) {
+        c += cont.busy ? 1 : 0;
+      }
+      return c;
+    }
+
+    public int containersFree() {
+      return containers.length - containersInUse();
+    }
+  }
+
+  /**
+   * Cluster container.
+   */
+  public static class MockYarnClusterContainer {
+    private MockContainerId cid;
+    private boolean busy;
+
+    MockYarnClusterContainer(MockContainerId cid) {
+      this.cid = cid;
+    }
+
+    public MockContainerId getCid() {
+      return cid;
+    }
+
+    void reset() {
+      busy = false;
+    }
+  }
+
+  public static int makeCid(int hostIndex, int containerIndex) {
+    return (hostIndex << 8) | containerIndex & 0xff;
+  }
+
+  public static final int extractHost(long cid) {
+    return (int)((cid >>> 8) & 0xffff);
+  }
+
+  public static final int extractContainer(long cid) {
+    return (int)(cid & 0xff);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockYarnEngine.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockYarnEngine.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockYarnEngine.java
new file mode 100644
index 0000000..9c5708f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockYarnEngine.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.CancelSingleRequest;
+import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * This is an evolving engine to mock YARN operations.
+ */
+public class MockYarnEngine {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(MockYarnEngine.class);
+
+  private MockYarnCluster cluster;
+  private Allocator allocator;
+  private List<ContainerRequestOperation> pending = new ArrayList<>();
+
+  private ApplicationId appId = new MockApplicationId(0, 0);
+
+  private ApplicationAttemptId attemptId = new MockApplicationAttemptId(appId,
+      1);
+
+  @Override
+  public String toString() {
+    return "MockYarnEngine " + cluster + " + pending=" + pending.size();
+  }
+
+  public int containerCount() {
+    return cluster.containersInUse();
+  }
+
+  public MockYarnEngine(int clusterSize, int containersPerNode) {
+    cluster = new MockYarnCluster(clusterSize, containersPerNode);
+    allocator = new Allocator(cluster);
+  }
+
+  public MockYarnCluster getCluster() {
+    return cluster;
+  }
+
+  public Allocator getAllocator() {
+    return allocator;
+  }
+
+  /**
+   * Allocate a container from a request. The containerID will be
+   * unique, nodeId and other fields chosen internally with
+   * no such guarantees; resource and priority copied over
+   * @param request request
+   * @return container
+   */
+  public Container allocateContainer(AMRMClient.ContainerRequest request) {
+    MockContainer allocated = allocator.allocate(request);
+    if (allocated != null) {
+      MockContainerId id = (MockContainerId)allocated.getId();
+      id.setApplicationAttemptId(attemptId);
+    }
+    return allocated;
+  }
+
+  MockYarnCluster.MockYarnClusterContainer releaseContainer(ContainerId
+      containerId) {
+    return cluster.release(containerId);
+  }
+
+  /**
+   * Process a list of operations -release containers to be released,
+   * allocate those for which there is space (but don't rescan the list after
+   * the scan).
+   * @param ops
+   * @return
+   */
+  public List<Container> execute(List<AbstractRMOperation> ops) {
+    return execute(ops, new ArrayList<>());
+  }
+
+  /**
+   * Process a list of operations -release containers to be released,
+   * allocate those for which there is space (but don't rescan the list after
+   * the scan). Unsatisifed entries are appended to the "pending" list
+   * @param ops operations
+   * @return the list of all satisfied operations
+   */
+  public List<Container> execute(List<AbstractRMOperation> ops,
+                               List<ContainerId> released) {
+    validateRequests(ops);
+    List<Container> allocation = new ArrayList<>();
+    for (AbstractRMOperation op : ops) {
+      if (op instanceof ContainerReleaseOperation) {
+        ContainerReleaseOperation cro = (ContainerReleaseOperation) op;
+        ContainerId cid = cro.getContainerId();
+        assertNotNull(releaseContainer(cid));
+        released.add(cid);
+      } else if (op instanceof CancelSingleRequest) {
+        // no-op
+        LOG.debug("cancel request {}", op);
+      } else if (op instanceof ContainerRequestOperation) {
+        ContainerRequestOperation req = (ContainerRequestOperation) op;
+        Container container = allocateContainer(req.getRequest());
+        if (container != null) {
+          LOG.info("allocated container {} for {}", container, req);
+          allocation.add(container);
+        } else {
+          LOG.debug("Unsatisfied allocation {}", req);
+          pending.add(req);
+        }
+      } else {
+        LOG.warn("Unsupported operation {}", op);
+      }
+    }
+    return allocation;
+  }
+
+  /**
+   * Try and mimic some of the logic of <code>AMRMClientImpl
+   * .checkLocalityRelaxationConflict</code>.
+   * @param ops operations list
+   */
+  void validateRequests(List<AbstractRMOperation> ops) {
+    // run through the requests and verify that they are all consistent.
+    List<ContainerRequestOperation> outstandingRequests = new ArrayList<>();
+    for (AbstractRMOperation operation : ops) {
+      if (operation instanceof ContainerRequestOperation) {
+        ContainerRequestOperation containerRequest =
+            (ContainerRequestOperation) operation;
+        ContainerRequest amRequest = containerRequest.getRequest();
+        Priority priority = amRequest.getPriority();
+        boolean relax = amRequest.getRelaxLocality();
+
+        for (ContainerRequestOperation req : outstandingRequests) {
+          if (req.getPriority() == priority && req.getRelaxLocality() !=
+              relax) {
+            // mismatch in values
+            Assert.fail("operation " + operation + " has incompatible request" +
+                    " priority from outsanding request");
+          }
+          outstandingRequests.add(containerRequest);
+
+        }
+
+      }
+    }
+  }
+
+  /**
+   * Get the list of node reports. These are not cloned; updates will persist
+   * in the nodemap.
+   * @return current node report list
+   */
+  List<NodeReport> getNodeReports() {
+    return cluster.getNodeReports();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/monkey/TestMockMonkey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/monkey/TestMockMonkey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/monkey/TestMockMonkey.java
new file mode 100644
index 0000000..31f8822
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/monkey/TestMockMonkey.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.monkey;
+
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.api.InternalKeys;
+import org.apache.slider.server.appmaster.actions.ActionHalt;
+import org.apache.slider.server.appmaster.actions.ActionKillContainer;
+import org.apache.slider.server.appmaster.actions.AsyncAction;
+import org.apache.slider.server.appmaster.actions.QueueService;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockRMOperationHandler;
+import org.apache.slider.server.appmaster.monkey.ChaosKillAM;
+import org.apache.slider.server.appmaster.monkey.ChaosKillContainer;
+import org.apache.slider.server.appmaster.monkey.ChaosMonkeyService;
+import org.apache.slider.server.appmaster.monkey.ChaosTarget;
+import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Test chaos monkey.
+ */
+public class TestMockMonkey extends BaseMockAppStateTest {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestMockMonkey.class);
+
+  /**
+   * This queue service is NOT started; tests need to poll the queue
+   * rather than expect them to execute.
+   */
+  private QueueService queues;
+  private ChaosMonkeyService monkey;
+
+  @Before
+  public void init() {
+    YarnConfiguration configuration = new YarnConfiguration();
+    queues = new QueueService();
+    queues.init(configuration);
+    monkey = new ChaosMonkeyService(METRICS.getMetrics(), queues);
+    monkey.init(configuration);
+  }
+
+  @Test
+  public void testMonkeyStart() throws Throwable {
+    monkey.start();
+    monkey.stop();
+  }
+
+  @Test
+  public void testMonkeyPlay() throws Throwable {
+    ChaosCounter counter = new ChaosCounter();
+    monkey.addTarget("target", counter, InternalKeys.PROBABILITY_PERCENT_100);
+    assertEquals(1, monkey.getTargetCount());
+    monkey.play();
+    assertEquals(1, counter.count);
+  }
+
+  @Test
+  public void testMonkeySchedule() throws Throwable {
+    ChaosCounter counter = new ChaosCounter();
+    assertEquals(0, monkey.getTargetCount());
+    monkey.addTarget("target", counter, InternalKeys.PROBABILITY_PERCENT_100);
+    assertEquals(1, monkey.getTargetCount());
+    assertTrue(monkey.schedule(0, 1, TimeUnit.SECONDS));
+    assertEquals(1, queues.scheduledActions.size());
+  }
+
+  @Test
+  public void testMonkeyDoesntAddProb0Actions() throws Throwable {
+    ChaosCounter counter = new ChaosCounter();
+    monkey.addTarget("target", counter, 0);
+    assertEquals(0, monkey.getTargetCount());
+    monkey.play();
+    assertEquals(0, counter.count);
+  }
+
+  @Test
+  public void testMonkeyScheduleProb0Actions() throws Throwable {
+    ChaosCounter counter = new ChaosCounter();
+    monkey.addTarget("target", counter, 0);
+    assertFalse(monkey.schedule(0, 1, TimeUnit.SECONDS));
+    assertEquals(0, queues.scheduledActions.size());
+  }
+
+  @Test
+  public void testMonkeyPlaySometimes() throws Throwable {
+    ChaosCounter counter = new ChaosCounter();
+    ChaosCounter counter2 = new ChaosCounter();
+    monkey.addTarget("target1", counter, InternalKeys.PROBABILITY_PERCENT_1
+        * 50);
+    monkey.addTarget("target2", counter2, InternalKeys
+        .PROBABILITY_PERCENT_1 * 25);
+
+    for (int i = 0; i < 100; i++) {
+      monkey.play();
+    }
+    LOG.info("Counter1 = {} counter2 = {}", counter.count, counter2.count);
+    /*
+     * Relying on probability here to give approximate answers
+     */
+    assertTrue(counter.count > 25);
+    assertTrue(counter.count < 75);
+    assertTrue(counter2.count < counter.count);
+  }
+
+  @Test
+  public void testAMKiller() throws Throwable {
+
+    ChaosKillAM chaos = new ChaosKillAM(queues, -1);
+    chaos.chaosAction();
+    assertEquals(1, queues.scheduledActions.size());
+    AsyncAction action = queues.scheduledActions.take();
+    assertTrue(action instanceof ActionHalt);
+  }
+
+  @Test
+  public void testContainerKillerEmptyApp() throws Throwable {
+
+
+    ChaosKillContainer chaos = new ChaosKillContainer(appState,
+        queues,
+        new MockRMOperationHandler());
+    chaos.chaosAction();
+    assertEquals(0, queues.scheduledActions.size());
+  }
+
+  @Ignore
+  @Test
+  public void testContainerKillerIgnoresAM() throws Throwable {
+    // TODO: AM needed in live container list?
+    addAppMastertoAppState();
+    assertEquals(1, appState.getLiveContainers().size());
+
+    ChaosKillContainer chaos = new ChaosKillContainer(appState,
+        queues,
+        new MockRMOperationHandler());
+    chaos.chaosAction();
+    assertEquals(0, queues.scheduledActions.size());
+  }
+
+  @Test
+  public void testContainerKiller() throws Throwable {
+    MockRMOperationHandler ops = new MockRMOperationHandler();
+    getRole0Status().setDesired(1);
+    List<RoleInstance> instances = createAndStartNodes();
+    assertEquals(1, instances.size());
+    RoleInstance instance = instances.get(0);
+
+    ChaosKillContainer chaos = new ChaosKillContainer(appState, queues, ops);
+    chaos.chaosAction();
+    assertEquals(1, queues.scheduledActions.size());
+    AsyncAction action = queues.scheduledActions.take();
+    ActionKillContainer killer = (ActionKillContainer) action;
+    assertEquals(killer.getContainerId(), instance.getContainerId());
+    killer.execute(null, queues, appState);
+    assertEquals(1, ops.getNumReleases());
+
+    ContainerReleaseOperation operation = (ContainerReleaseOperation) ops
+        .getFirstOp();
+    assertEquals(operation.getContainerId(), instance.getContainerId());
+  }
+
+  /**
+   * Chaos target that just implements a counter.
+   */
+  private static class ChaosCounter implements ChaosTarget {
+    private int count;
+
+    @Override
+    public void chaosAction() {
+      count++;
+    }
+
+
+    @Override
+    public String toString() {
+      final StringBuilder sb = new StringBuilder(
+          "ChaosCounter{");
+      sb.append("count=").append(count);
+      sb.append('}');
+      return sb.toString();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/security/TestSecurityConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/security/TestSecurityConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/security/TestSecurityConfiguration.java
new file mode 100644
index 0000000..5a19a3a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/security/TestSecurityConfiguration.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.security;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.common.SliderXmlConfKeys;
+import org.apache.slider.core.exceptions.SliderException;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * Test security configuration.
+ */
+public class TestSecurityConfiguration {
+
+  @Test
+  public void testValidLocalConfiguration() throws Throwable {
+    Configuration config = new Configuration();
+    config.set(CommonConfigurationKeysPublic
+        .HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    Map<String, String> compOps = new HashMap<>();
+    compOps.put(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL, "test");
+    compOps.put(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH,
+        "/some/local/path");
+    Application application = new Application().configuration(new org.apache
+        .slider.api.resource.Configuration().properties(compOps));
+
+    SecurityConfiguration securityConfiguration =
+        new SecurityConfiguration(config, application, "testCluster");
+  }
+
+  @Test
+  public void testValidDistributedConfiguration() throws Throwable {
+    Configuration config = new Configuration();
+    config.set(CommonConfigurationKeysPublic
+        .HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    Map<String, String> compOps = new HashMap<>();
+    compOps.put(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL, "test");
+    compOps.put(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME, "some.keytab");
+    Application application = new Application().configuration(new org.apache
+        .slider.api.resource.Configuration().properties(compOps));
+
+    SecurityConfiguration securityConfiguration =
+        new SecurityConfiguration(config, application, "testCluster");
+  }
+
+  @Test
+  public void testMissingPrincipalNoLoginWithDistributedConfig() throws
+      Throwable {
+    Configuration config = new Configuration();
+    config.set(CommonConfigurationKeysPublic
+        .HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    Map<String, String> compOps = new HashMap<>();
+    compOps.put(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME, "some.keytab");
+    Application application = new Application().configuration(new org.apache
+        .slider.api.resource.Configuration().properties(compOps));
+
+    try {
+      SecurityConfiguration securityConfiguration =
+          new SecurityConfiguration(config, application, "testCluster") {
+            @Override
+            protected UserGroupInformation getLoginUser() throws
+                IOException {
+              return null;
+            }
+          };
+      fail("expected SliderException");
+    } catch (SliderException e) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testMissingPrincipalNoLoginWithLocalConfig() throws Throwable {
+    Configuration config = new Configuration();
+    config.set(CommonConfigurationKeysPublic
+        .HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    Map<String, String> compOps = new HashMap<>();
+    compOps.put(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH,
+        "/some/local/path");
+    Application application = new Application().configuration(new org.apache
+        .slider.api.resource.Configuration().properties(compOps));
+
+    try {
+      SecurityConfiguration securityConfiguration =
+          new SecurityConfiguration(config, application, "testCluster") {
+            @Override
+            protected UserGroupInformation getLoginUser() throws IOException {
+              return null;
+            }
+          };
+      fail("expected SliderException");
+    } catch (SliderException e) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testBothKeytabMechanismsConfigured() throws Throwable {
+    Configuration config = new Configuration();
+    config.set(CommonConfigurationKeysPublic
+        .HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    Map<String, String> compOps = new HashMap<>();
+    compOps.put(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL, "test");
+    compOps.put(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH,
+        "/some/local/path");
+    compOps.put(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME, "some.keytab");
+    Application application = new Application().configuration(new org.apache
+        .slider.api.resource.Configuration().properties(compOps));
+
+    try {
+      SecurityConfiguration securityConfiguration =
+          new SecurityConfiguration(config, application,
+              "testCluster");
+      fail("expected SliderException");
+    } catch (SliderException e) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testMissingPrincipalButLoginWithDistributedConfig() throws
+      Throwable {
+    Configuration config = new Configuration();
+    config.set(CommonConfigurationKeysPublic
+        .HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    Map<String, String> compOps = new HashMap<>();
+    compOps.put(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME, "some.keytab");
+    Application application = new Application().configuration(new org.apache
+        .slider.api.resource.Configuration().properties(compOps));
+
+    SecurityConfiguration securityConfiguration =
+        new SecurityConfiguration(config, application, "testCluster");
+  }
+
+  @Test
+  public void testMissingPrincipalButLoginWithLocalConfig() throws Throwable {
+    Configuration config = new Configuration();
+    config.set(CommonConfigurationKeysPublic
+        .HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    Map<String, String> compOps = new HashMap<>();
+    compOps.put(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH,
+        "/some/local/path");
+    Application application = new Application().configuration(new org.apache
+        .slider.api.resource.Configuration().properties(compOps));
+
+    SecurityConfiguration securityConfiguration =
+        new SecurityConfiguration(config, application, "testCluster");
+  }
+
+  @Test
+  public void testKeypathLocationOnceLocalized() throws Throwable {
+    Configuration config = new Configuration();
+    config.set(CommonConfigurationKeysPublic
+        .HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    Map<String, String> compOps = new HashMap<>();
+    compOps.put(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME, "some.keytab");
+    Application application = new Application().configuration(new org.apache
+        .slider.api.resource.Configuration().properties(compOps));
+
+    SecurityConfiguration securityConfiguration =
+        new SecurityConfiguration(config, application, "testCluster");
+
+    assertEquals(new File(SliderKeys.KEYTAB_DIR, "some.keytab")
+            .getAbsolutePath(),
+        securityConfiguration.getKeytabFile().getAbsolutePath());
+  }
+
+  @Test
+  public void testAMKeytabProvided() throws Throwable {
+    Configuration config = new Configuration();
+    Map<String, String> compOps = new HashMap<>();
+    compOps.put(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH, " ");
+    Application application = new Application().configuration(new org.apache
+        .slider.api.resource.Configuration().properties(compOps));
+
+    SecurityConfiguration securityConfiguration =
+        new SecurityConfiguration(config, application, "testCluster");
+    assertFalse(securityConfiguration.isKeytabProvided());
+
+    compOps.put(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME, "");
+    assertFalse(securityConfiguration.isKeytabProvided());
+
+    compOps.put(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME, "some.keytab");
+    assertTrue(securityConfiguration.isKeytabProvided());
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: YARN-6545. Followup fix for YARN-6405. Contributed by Jian He

Posted by ji...@apache.org.
YARN-6545. Followup fix for YARN-6405. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d9690f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d9690f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d9690f2

Branch: refs/heads/yarn-native-services
Commit: 0d9690f25cfffa3bf375fe6c1d5af205f566b313
Parents: 56b6c2b
Author: Billie Rinaldi <bi...@apache.org>
Authored: Tue May 9 09:26:00 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:25 2017 -0700

----------------------------------------------------------------------
 .../apache/slider/api/ServiceApiConstants.java  | 19 +++++++
 .../slider/api/resource/Configuration.java      | 26 +++-------
 .../org/apache/slider/client/SliderClient.java  | 26 +++++-----
 .../slider/common/tools/CoreFileSystem.java     | 47 ------------------
 .../core/registry/docstore/ConfigFormat.java    |  2 +-
 .../apache/slider/core/zk/ZKIntegration.java    |  2 +-
 .../apache/slider/providers/ProviderRole.java   | 14 ++----
 .../apache/slider/providers/ProviderUtils.java  | 43 ++++++----------
 .../providers/docker/DockerProviderService.java |  8 +--
 .../server/appmaster/RoleLaunchService.java     |  4 +-
 .../server/appmaster/SliderAppMaster.java       |  1 +
 .../slider/server/appmaster/state/AppState.java | 52 ++++++++++++++++----
 .../appmaster/state/AppStateBindingInfo.java    |  2 +
 .../appmaster/state/ProviderAppState.java       | 12 +++++
 .../server/appmaster/state/RoleInstance.java    |  8 +--
 .../state/StateAccessForProviders.java          | 12 +++++
 .../appstate/TestMockAppStateRMOperations.java  | 48 ++++++++++++++++++
 .../appstate/TestMockAppStateUniqueNames.java   |  2 +-
 .../model/mock/BaseMockAppStateTest.java        |  6 +--
 .../runtime/DockerLinuxContainerRuntime.java    |  2 +
 20 files changed, 194 insertions(+), 142 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ServiceApiConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ServiceApiConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ServiceApiConstants.java
index 5f76f19..da87e3a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ServiceApiConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ServiceApiConstants.java
@@ -36,6 +36,10 @@ public interface ServiceApiConstants {
 
   String SERVICE_NAME_LC = $("SERVICE_NAME.lc");
 
+  String USER = $("USER");
+
+  String DOMAIN = $("DOMAIN");
+
   // Constants for component
   String COMPONENT_NAME = $("COMPONENT_NAME");
 
@@ -47,4 +51,19 @@ public interface ServiceApiConstants {
   String COMPONENT_ID = $("COMPONENT_ID");
 
   String CONTAINER_ID = $("CONTAINER_ID");
+
+  // Constants for default cluster ZK
+  String CLUSTER_ZK_QUORUM = $("CLUSTER_ZK_QUORUM");
+
+  // URI for the default cluster fs
+  String CLUSTER_FS_URI = $("CLUSTER_FS_URI");
+
+  // the host component of the cluster fs UI
+  String CLUSTER_FS_HOST = $("CLUSTER_FS_HOST");
+
+  // Path in zookeeper for a specific service
+  String SERVICE_ZK_PATH = $("SERVICE_ZK_PATH");
+
+  // Constants for service specific hdfs dir
+  String SERVICE_HDFS_DIR = $("SERVICE_HDFS_DIR");
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
index 0df586c..37d1a40 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
@@ -105,10 +105,7 @@ public class Configuration implements Serializable {
   }
 
   public long getPropertyLong(String name, long defaultValue) {
-    if (name == null) {
-      return defaultValue;
-    }
-    String value = properties.get(name.trim());
+    String value = getProperty(name);
     if (StringUtils.isEmpty(value)) {
       return defaultValue;
     }
@@ -116,10 +113,7 @@ public class Configuration implements Serializable {
   }
 
   public int getPropertyInt(String name, int defaultValue) {
-    if (name == null) {
-      return defaultValue;
-    }
-    String value = properties.get(name.trim());
+    String value = getProperty(name);
     if (StringUtils.isEmpty(value)) {
       return defaultValue;
     }
@@ -127,10 +121,7 @@ public class Configuration implements Serializable {
   }
 
   public boolean getPropertyBool(String name, boolean defaultValue) {
-    if (name == null) {
-      return defaultValue;
-    }
-    String value = properties.get(name.trim());
+    String value = getProperty(name);
     if (StringUtils.isEmpty(value)) {
       return defaultValue;
     }
@@ -138,10 +129,11 @@ public class Configuration implements Serializable {
   }
 
   public String getProperty(String name, String defaultValue) {
-    if (name == null) {
+    String value = getProperty(name);
+    if (StringUtils.isEmpty(value)) {
       return defaultValue;
     }
-    return properties.get(name.trim());
+    return value;
   }
 
   public void setProperty(String name, String value) {
@@ -149,16 +141,10 @@ public class Configuration implements Serializable {
   }
 
   public String getProperty(String name) {
-    if (name == null) {
-      return null;
-    }
     return properties.get(name.trim());
   }
 
   public String getEnv(String name) {
-    if (name == null) {
-      return null;
-    }
     return env.get(name.trim());
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index 7241374..83b4841 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -1112,18 +1112,20 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
           "not a directory", folder);
     }
 
-    for (File f : files) {
-      srcFile = new Path(f.toURI());
-
-      Path fileInFs = new Path(pkgPath, srcFile.getName());
-      log.info("Installing file {} at {} and overwrite is {}.",
-          srcFile, fileInFs, resourceInfo.overwrite);
-      require(!(sfs.exists(fileInFs) && !resourceInfo.overwrite),
-          "File exists at %s. Use --overwrite to overwrite.", fileInFs.toUri());
-
-      sfs.copyFromLocalFile(false, resourceInfo.overwrite, srcFile, fileInFs);
-      sfs.setPermission(fileInFs,
-          new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE));
+    if (files != null) {
+      for (File f : files) {
+        srcFile = new Path(f.toURI());
+
+        Path fileInFs = new Path(pkgPath, srcFile.getName());
+        log.info("Installing file {} at {} and overwrite is {}.",
+            srcFile, fileInFs, resourceInfo.overwrite);
+        require(!(sfs.exists(fileInFs) && !resourceInfo.overwrite),
+            "File exists at %s. Use --overwrite to overwrite.", fileInFs.toUri());
+
+        sfs.copyFromLocalFile(false, resourceInfo.overwrite, srcFile, fileInFs);
+        sfs.setPermission(fileInFs,
+            new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE));
+      }
     }
 
     return EXIT_SUCCESS;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
index 5f5e611..0c249d0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
@@ -597,53 +597,6 @@ public class CoreFileSystem {
     providerResources.put(SliderKeys.SLIDER_DEPENDENCY_LOCALIZED_DIR_LINK, lc);
   }
 
-  /**
-   * Copy local file(s) to destination HDFS directory. If {@code localPath} is a
-   * local directory then all files matching the {@code filenameFilter}
-   * (optional) are copied, otherwise {@code filenameFilter} is ignored.
-   * 
-   * @param localPath
-   *          a local file or directory path
-   * @param filenameFilter
-   *          if {@code localPath} is a directory then filenameFilter is used as
-   *          a filter (if specified)
-   * @param destDir
-   *          the destination HDFS directory where the file(s) should be copied
-   * @param fp
-   *          file permissions of all the directories and files that will be
-   *          created in this api
-   * @throws IOException
-   */
-  public void copyLocalFilesToHdfs(File localPath,
-      FilenameFilter filenameFilter, Path destDir, FsPermission fp)
-      throws IOException {
-    if (localPath == null || destDir == null) {
-      throw new IOException("Either localPath or destDir is null");
-    }
-    fileSystem.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
-        "000");
-    fileSystem.mkdirs(destDir, fp);
-    if (localPath.isDirectory()) {
-      // copy all local files under localPath to destDir (honoring filename
-      // filter if provided
-      File[] localFiles = localPath.listFiles(filenameFilter);
-      Path[] localFilePaths = new Path[localFiles.length];
-      int i = 0;
-      for (File localFile : localFiles) {
-        localFilePaths[i++] = new Path(localFile.getPath());
-      }
-      log.info("Copying {} files from {} to {}", i, localPath.toURI(),
-          destDir.toUri());
-      fileSystem.copyFromLocalFile(false, true, localFilePaths, destDir);
-    } else {
-      log.info("Copying file {} to {}", localPath.toURI(), destDir.toUri());
-      fileSystem.copyFromLocalFile(false, true, new Path(localPath.getPath()),
-          destDir);
-    }
-    // set permissions for all the files created in the destDir
-    fileSystem.setPermission(destDir, fp);
-  }
-
   public void copyLocalFileToHdfs(File localPath,
       Path destPath, FsPermission fp)
       throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigFormat.java
index 723b975..081688b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigFormat.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigFormat.java
@@ -25,7 +25,7 @@ public enum ConfigFormat {
   JSON("json"),
   PROPERTIES("properties"),
   XML("xml"),
-  HADOOP_XML("hadoop-xml"),
+  HADOOP_XML("hadoop_xml"),
   ENV("env"),
   TEMPLATE("template"),
   YAML("yaml"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
index 4302530..519cd16 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
@@ -52,7 +52,7 @@ public class ZKIntegration implements Watcher, Closeable {
   public static final String SVC_SLIDER = "/" + ZK_SERVICES + "/" + ZK_SLIDER;
   public static final String SVC_SLIDER_USERS = SVC_SLIDER + "/" + ZK_USERS;
 
-  public static final List<String> ZK_USERS_PATH_LIST = new ArrayList<String>();
+  private static final List<String> ZK_USERS_PATH_LIST = new ArrayList<String>();
   static {
     ZK_USERS_PATH_LIST.add(ZK_SERVICES);
     ZK_USERS_PATH_LIST.add(ZK_SLIDER);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
index 9cc48e1..182e956 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
@@ -20,10 +20,8 @@ package org.apache.slider.providers;
 
 import org.apache.slider.api.ResourceKeys;
 import org.apache.slider.api.resource.Component;
-import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.RoleInstance;
 
-import java.util.LinkedList;
-import java.util.List;
 import java.util.Queue;
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.atomic.AtomicLong;
@@ -44,8 +42,7 @@ public final class ProviderRole {
   public final String labelExpression;
   public final Component component;
   public AtomicLong componentIdCounter = null;
-  public AppState appState;
-  public Queue<String> failedInstanceName = new ConcurrentLinkedQueue<String>();
+  public Queue<RoleInstance> failedInstances = new ConcurrentLinkedQueue<>();
   public ProviderRole(String name, int id) {
     this(name,
         id,
@@ -78,7 +75,7 @@ public final class ProviderRole {
         nodeFailureThreshold,
         placementTimeoutSeconds,
         labelExpression,
-        new Component().name(name).numberOfContainers(0L), null);
+        new Component().name(name).numberOfContainers(0L));
   }
 
   /**
@@ -88,13 +85,13 @@ public final class ProviderRole {
    * @param id ID. This becomes the YARN priority
    * @param policy placement policy
    * @param nodeFailureThreshold threshold for node failures (within a reset interval)
-* after which a node failure is considered an app failure
+   * after which a node failure is considered an app failure
    * @param placementTimeoutSeconds for lax placement, timeout in seconds before
    * @param labelExpression label expression for requests; may be null
    */
   public ProviderRole(String name, String group, int id, int policy,
       int nodeFailureThreshold, long placementTimeoutSeconds,
-      String labelExpression, Component component, AppState state) {
+      String labelExpression, Component component) {
     this.name = name;
     if (group == null) {
       this.group = name;
@@ -110,7 +107,6 @@ public final class ProviderRole {
     if(component.getUniqueComponentSupport()) {
       componentIdCounter = new AtomicLong(0);
     }
-    this.appState = state;
   }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
index d384585..beeaa55 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.slider.api.ClusterNode;
-import org.apache.slider.api.OptionKeys;
 import org.apache.slider.api.ResourceKeys;
 import org.apache.slider.api.RoleKeys;
 import org.apache.slider.api.resource.Application;
@@ -59,7 +58,6 @@ import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.OutputStream;
-import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -271,8 +269,8 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
   // 2. Add the config file to localResource
   public synchronized void createConfigFileAndAddLocalResource(
       ContainerLauncher launcher, SliderFileSystem fs, Component component,
-      Map<String, String> tokensForSubstitution, RoleInstance roleInstance)
-      throws IOException {
+      Map<String, String> tokensForSubstitution, RoleInstance roleInstance,
+      StateAccessForProviders appState) throws IOException {
     Path compDir =
         new Path(new Path(fs.getAppDir(), "components"), component.getName());
     Path compInstanceDir =
@@ -315,12 +313,12 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
           case HADOOP_XML:
             // Hadoop_xml_template
             resolveHadoopXmlTemplateAndSaveOnHdfs(fs.getFileSystem(),
-                tokensForSubstitution, configFile, remoteFile, roleInstance);
+                tokensForSubstitution, configFile, remoteFile, appState);
             break;
           case TEMPLATE:
             // plain-template
             resolvePlainTemplateAndSaveOnHdfs(fs.getFileSystem(),
-                tokensForSubstitution, configFile, remoteFile, roleInstance);
+                tokensForSubstitution, configFile, remoteFile, appState);
             break;
           default:
             log.info("Not supporting loading src_file for " + configFile);
@@ -383,11 +381,11 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
   @SuppressWarnings("unchecked")
   private void resolveHadoopXmlTemplateAndSaveOnHdfs(FileSystem fs,
       Map<String, String> tokensForSubstitution, ConfigFile configFile,
-      Path remoteFile, RoleInstance roleInstance) throws IOException {
+      Path remoteFile, StateAccessForProviders appState) throws IOException {
     Map<String, String> conf;
     try {
-      conf = (Map<String, String>) roleInstance.providerRole.
-          appState.configFileCache.get(configFile);
+      conf = (Map<String, String>) appState.getConfigFileCache()
+          .get(configFile);
     } catch (ExecutionException e) {
       log.info("Failed to load config file: " + configFile, e);
       return;
@@ -426,17 +424,16 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
   // 3) save on hdfs
   private void resolvePlainTemplateAndSaveOnHdfs(FileSystem fs,
       Map<String, String> tokensForSubstitution, ConfigFile configFile,
-      Path remoteFile, RoleInstance roleInstance) {
+      Path remoteFile, StateAccessForProviders appState) {
     String content;
     try {
-      content = (String) roleInstance.providerRole.appState.configFileCache
-          .get(configFile);
+      content = (String) appState.getConfigFileCache().get(configFile);
     } catch (ExecutionException e) {
       log.info("Failed to load config file: " + configFile, e);
       return;
     }
     // substitute tokens
-    substituteStrWithTokens(content, tokensForSubstitution);
+    content = substituteStrWithTokens(content, tokensForSubstitution);
 
     try (OutputStream output = fs.create(remoteFile)) {
       org.apache.commons.io.IOUtils.write(content, output);
@@ -446,25 +443,13 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
   }
 
   /**
-   * Get initial token map to be substituted into config values.
-   * @param appConf app configurations
-   * @param clusterName app name
+   * Get initial component token map to be substituted into config values.
+   * @param roleInstance role instance
    * @return tokens to replace
    */
-  public Map<String, String> getStandardTokenMap(Configuration appConf,
-      RoleInstance roleInstance, String clusterName) {
-
+  public Map<String, String> initCompTokensForSubstitute(
+      RoleInstance roleInstance) {
     Map<String, String> tokens = new HashMap<>();
-
-    String nnuri = appConf.getProperty("fs.defaultFS");
-    if (nnuri != null && !nnuri.isEmpty()) {
-      tokens.put("${NN_URI}", nnuri);
-      tokens.put("${NN_HOST}", URI.create(nnuri).getHost());
-    }
-    tokens.put("${ZK_HOST}", appConf.getProperty(OptionKeys.ZOOKEEPER_HOSTS));
-    tokens.put("${DEFAULT_ZK_PATH}", appConf.getProperty(OptionKeys.ZOOKEEPER_PATH));
-    tokens.put(SERVICE_NAME_LC, clusterName.toLowerCase());
-    tokens.put(SERVICE_NAME, clusterName);
     tokens.put(COMPONENT_NAME, roleInstance.role);
     tokens.put(COMPONENT_NAME_LC, roleInstance.role.toLowerCase());
     tokens.put(COMPONENT_INSTANCE_NAME, roleInstance.getCompInstanceName());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
index 482bb27..12c2b04 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
@@ -88,10 +88,10 @@ public class DockerProviderService extends AbstractService
 
     // Generate tokens (key-value pair) for config substitution.
     // Get pre-defined tokens
+    Map<String, String> globalTokens = amState.getGlobalSubstitutionTokens();
     Map<String, String> tokensForSubstitution = providerUtils
-        .getStandardTokenMap(application.getConfiguration(), roleInstance,
-            application.getName());
-
+        .initCompTokensForSubstitute(roleInstance);
+    tokensForSubstitution.putAll(globalTokens);
     // Set the environment variables in launcher
     launcher.putEnv(SliderUtils
         .buildEnvMap(component.getConfiguration(), tokensForSubstitution));
@@ -111,7 +111,7 @@ public class DockerProviderService extends AbstractService
 
     // create config file on hdfs and add local resource
     providerUtils.createConfigFileAndAddLocalResource(launcher, fileSystem,
-        component, tokensForSubstitution, roleInstance);
+        component, tokensForSubstitution, roleInstance, amState);
 
     // substitute launch command
     String launchCommand = ProviderUtils

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
index c53349f..7c096c7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
@@ -150,10 +150,10 @@ public class RoleLaunchService
         containerLauncher.setupUGI();
         containerLauncher.putEnv(envVars);
 
-        String failedInstance = role.failedInstanceName.poll();
+        RoleInstance failedInstance = role.failedInstances.poll();
         RoleInstance instance;
         if (failedInstance != null) {
-          instance = new RoleInstance(container, role, failedInstance);
+          instance = new RoleInstance(container, failedInstance);
         } else {
           instance = new RoleInstance(container, role);
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
index ffa07fb..ae03b45 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -819,6 +819,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       binding.releaseSelector =  new MostRecentContainerReleaseSelector();
       binding.nodeReports = nodeReports;
       binding.application = application;
+      binding.serviceHdfsDir = fs.buildClusterDirPath(appName).toString();
       appState.buildInstance(binding);
 
       // build up environment variables that the AM wants set in every container

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
index 9f7b4a8..1e1b377 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
@@ -24,11 +24,11 @@ import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
 import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -42,6 +42,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.slider.api.ClusterNode;
 import org.apache.slider.api.InternalKeys;
+import org.apache.slider.api.ServiceApiConstants;
 import org.apache.slider.api.StatusKeys;
 import org.apache.slider.api.proto.Messages;
 import org.apache.slider.api.proto.Messages.ComponentCountProto;
@@ -61,6 +62,7 @@ import org.apache.slider.core.exceptions.ErrorStrings;
 import org.apache.slider.core.exceptions.NoSuchNodeException;
 import org.apache.slider.core.exceptions.SliderInternalStateException;
 import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
+import org.apache.slider.core.zk.ZKIntegration;
 import org.apache.slider.providers.PlacementPolicy;
 import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.server.appmaster.management.MetricsAndMonitoring;
@@ -75,6 +77,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -89,7 +92,12 @@ import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
+import static org.apache.hadoop.registry.client.api.RegistryConstants.DEFAULT_REGISTRY_ZK_QUORUM;
+import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_DNS_DOMAIN;
+import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_REGISTRY_ZK_QUORUM;
 import static org.apache.slider.api.ResourceKeys.*;
+import static org.apache.slider.api.ServiceApiConstants.*;
 import static org.apache.slider.api.StateValues.*;
 import static org.apache.slider.api.resource.ApplicationState.STARTED;
 
@@ -193,14 +201,13 @@ public class AppState {
   private int containerMinMemory;
 
   private RoleHistory roleHistory;
-  private Configuration publishedProviderConf;
   private long startTimeThreshold;
 
   private int failureThreshold = 10;
   private int nodeFailureThreshold = 3;
 
   private String logServerURL = "";
-
+  public Map<String, String> globalTokens = new HashMap<>();
   /**
    * Selector of containers to release; application wide.
    */
@@ -335,6 +342,7 @@ public class AppState {
         DEFAULT_CONTAINER_FAILURE_THRESHOLD);
     nodeFailureThreshold = conf.getPropertyInt(NODE_FAILURE_THRESHOLD,
         DEFAULT_NODE_FAILURE_THRESHOLD);
+    initGlobalTokensForSubstitute(binding);
 
     //build the initial component list
     int priority = 1;
@@ -367,6 +375,34 @@ public class AppState {
     createConfigFileCache(binding.fs);
   }
 
+  private void initGlobalTokensForSubstitute(AppStateBindingInfo binding)
+      throws IOException {
+    // ZK
+    globalTokens.put(ServiceApiConstants.CLUSTER_ZK_QUORUM,
+        binding.serviceConfig
+            .getTrimmed(KEY_REGISTRY_ZK_QUORUM, DEFAULT_REGISTRY_ZK_QUORUM));
+    String user = UserGroupInformation.getCurrentUser().getShortUserName();
+    globalTokens
+        .put(SERVICE_ZK_PATH, ZKIntegration.mkClusterPath(user, app.getName()));
+
+    globalTokens.put(ServiceApiConstants.USER, user);
+    String dnsDomain = binding.serviceConfig.getTrimmed(KEY_DNS_DOMAIN);
+    if (dnsDomain != null && !dnsDomain.isEmpty()) {
+      globalTokens.put(ServiceApiConstants.DOMAIN, dnsDomain);
+    }
+    // HDFS
+    String clusterFs = binding.serviceConfig.getTrimmed(FS_DEFAULT_NAME_KEY);
+    if (clusterFs != null && !clusterFs.isEmpty()) {
+      globalTokens.put(ServiceApiConstants.CLUSTER_FS_URI, clusterFs);
+      globalTokens.put(ServiceApiConstants.CLUSTER_FS_HOST,
+          URI.create(clusterFs).getHost());
+    }
+    globalTokens.put(SERVICE_HDFS_DIR, binding.serviceHdfsDir);
+    // service name
+    globalTokens.put(SERVICE_NAME_LC, app.getName().toLowerCase());
+    globalTokens.put(SERVICE_NAME, app.getName());
+  }
+
   private void createConfigFileCache(final FileSystem fileSystem) {
     this.configFileCache =
         CacheBuilder.newBuilder().expireAfterAccess(10, TimeUnit.MINUTES)
@@ -411,7 +447,7 @@ public class AppState {
         DEF_YARN_LABEL_EXPRESSION);
     ProviderRole newRole =
         new ProviderRole(name, group, priority, (int)placementPolicy, threshold,
-            placementTimeout, label, component, this);
+            placementTimeout, label, component);
     buildRole(newRole, component);
     log.info("Created a new role " + newRole);
     return newRole;
@@ -1300,8 +1336,7 @@ public class AppState {
         try {
           RoleStatus roleStatus = lookupRoleStatus(roleInstance.roleId);
           decRunningContainers(roleStatus);
-          roleStatus.getProviderRole().failedInstanceName
-              .offer(roleInstance.compInstanceName);
+          roleStatus.getProviderRole().failedInstances.offer(roleInstance);
           boolean shortLived = isShortLived(roleInstance);
           String message;
           Container failedContainer = roleInstance.container;
@@ -1742,8 +1777,7 @@ public class AppState {
         for (RoleInstance possible : finalCandidates) {
           log.info("Targeting for release: {}", possible);
           containerReleaseSubmitted(possible.container);
-          role.getProviderRole().failedInstanceName
-              .offer(possible.compInstanceName);
+          role.getProviderRole().failedInstances.offer(possible);
           operations.add(new ContainerReleaseOperation(possible.getContainerId()));
         }
       }
@@ -1862,7 +1896,6 @@ public class AppState {
       //get the role
       final ContainerId cid = container.getId();
       final RoleStatus role = lookupRoleStatus(container);
-      decRequestedContainers(role);
 
       //inc allocated count -this may need to be dropped in a moment,
       // but us needed to update the logic below
@@ -1888,6 +1921,7 @@ public class AppState {
         role.getComponentMetrics().surplusContainers.incr();
         containersRunning.decr();
       } else {
+        decRequestedContainers(role);
         log.info("Assigning role {} to container" + " {}," + " on {}:{},",
             roleName, cid, nodeId.getHost(), nodeId.getPort());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppStateBindingInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppStateBindingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppStateBindingInfo.java
index 2dfded8..ac9b8eb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppStateBindingInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppStateBindingInfo.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.slider.api.resource.Application;
+import org.apache.slider.common.tools.CoreFileSystem;
 import org.apache.slider.providers.ProviderRole;
 
 import java.util.ArrayList;
@@ -45,6 +46,7 @@ public class AppStateBindingInfo {
   public Path historyPath;
   public List<Container> liveContainers = new ArrayList<>(0);
   public ContainerReleaseSelector releaseSelector = new SimpleReleaseSelector();
+  public String serviceHdfsDir = "";
   /** node reports off the RM. */
   public List<NodeReport> nodeReports = new ArrayList<>(0);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ProviderAppState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ProviderAppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ProviderAppState.java
index 8046472..8fc08b7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ProviderAppState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ProviderAppState.java
@@ -18,11 +18,13 @@
 
 package org.apache.slider.server.appmaster.state;
 
+import com.google.common.cache.LoadingCache;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.slider.api.ClusterNode;
 import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.ConfigFile;
 import org.apache.slider.api.types.ApplicationLivenessInformation;
 import org.apache.slider.api.types.ComponentInformation;
 import org.apache.slider.api.types.NodeInformation;
@@ -262,4 +264,14 @@ public class ProviderAppState implements StateAccessForProviders {
   public RoleStatistics getRoleStatistics() {
     return appState.getRoleStatistics();
   }
+
+  @Override
+  public Map<String, String> getGlobalSubstitutionTokens() {
+    return appState.globalTokens;
+  }
+
+  @Override
+  public LoadingCache<ConfigFile, Object> getConfigFileCache() {
+    return appState.configFileCache;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
index 736dfd1..9ac26b5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java
@@ -128,11 +128,11 @@ public final class RoleInstance implements Cloneable {
     this.providerRole = role;
   }
 
-  public RoleInstance(Container container, ProviderRole role,
-      String compInstanceName) {
+  public RoleInstance(Container container, RoleInstance failedInstance) {
     this(container);
-    this.compInstanceName = compInstanceName;
-    this.providerRole = role;
+    this.componentId = failedInstance.componentId;
+    this.compInstanceName = failedInstance.compInstanceName;
+    this.providerRole = failedInstance.providerRole;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
index 5bc6dce..90221cb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
@@ -18,12 +18,14 @@
 
 package org.apache.slider.server.appmaster.state;
 
+import com.google.common.cache.LoadingCache;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.slider.api.ClusterNode;
 import org.apache.slider.api.StatusKeys;
 import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.ConfigFile;
 import org.apache.slider.api.types.ApplicationLivenessInformation;
 import org.apache.slider.api.types.ComponentInformation;
 import org.apache.slider.api.types.NodeInformation;
@@ -260,4 +262,14 @@ public interface StateAccessForProviders {
    * @return role statistics
    */
   RoleStatistics getRoleStatistics();
+
+  /**
+   * Get global substitution tokens.
+   */
+  Map<String, String> getGlobalSubstitutionTokens();
+
+  /**
+   * Get config file cache.
+   */
+  LoadingCache<ConfigFile, Object> getConfigFileCache();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRMOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRMOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRMOperations.java
index 2d87be6..363c551 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRMOperations.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRMOperations.java
@@ -379,4 +379,52 @@ public class TestMockAppStateRMOperations extends BaseMockAppStateTest
     assertNull(ri3);
   }
 
+  @Test
+  public void testDoubleAllocate() throws Throwable {
+    getRole0Status().setDesired(1);
+
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    ContainerRequestOperation operation = (ContainerRequestOperation)ops.get(0);
+    AMRMClient.ContainerRequest request = operation.getRequest();
+    Container cont = engine.allocateContainer(request);
+    List<Container> allocated = new ArrayList<>();
+    allocated.add(cont);
+    List<ContainerAssignment> assignments = new ArrayList<>();
+    List<AbstractRMOperation> operations = new ArrayList<>();
+    assertEquals(0L, getRole0Status().getRunning());
+    assertEquals(1L, getRole0Status().getRequested());
+    appState.onContainersAllocated(allocated, assignments, operations);
+
+    assertListLength(ops, 1);
+    assertListLength(assignments, 1);
+    ContainerAssignment assigned = assignments.get(0);
+    Container target = assigned.container;
+    assertEquals(target.getId(), cont.getId());
+    int roleId = assigned.role.getPriority();
+    assertEquals(roleId, extractRole(request.getPriority()));
+    assertEquals(assigned.role.getName(), ROLE0);
+    RoleInstance ri = roleInstance(assigned);
+    //tell the app it arrived
+    appState.containerStartSubmitted(target, ri);
+    appState.innerOnNodeManagerContainerStarted(target.getId());
+    assertEquals(1L, getRole0Status().getRunning());
+    assertEquals(0L, getRole0Status().getRequested());
+
+    // now get an extra allocation that should be released
+    cont = engine.allocateContainer(request);
+    allocated = new ArrayList<>();
+    allocated.add(cont);
+    assignments = new ArrayList<>();
+    operations = new ArrayList<>();
+    appState.onContainersAllocated(allocated, assignments, operations);
+
+    assertListLength(operations, 1);
+    assertTrue(operations.get(0) instanceof ContainerReleaseOperation);
+    ContainerReleaseOperation release = (ContainerReleaseOperation)
+        operations.get(0);
+    assertEquals(release.getContainerId(), cont.getId());
+
+    assertEquals(1L, getRole0Status().getRunning());
+    assertEquals(0L, getRole0Status().getRequested());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
index 54ffe17..b7e967f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
@@ -99,6 +99,7 @@ public class TestMockAppStateUniqueNames extends BaseMockAppStateTest
       assertEquals(roles[i], entry.getKey());
       RoleInstance instance = entry.getValue();
       assertEquals(roles[i], instance.compInstanceName);
+      assertEquals(i, instance.componentId);
       assertEquals(group, instance.role);
       assertEquals(group, instance.providerRole.name);
       assertEquals(group, instance.providerRole.group);
@@ -129,7 +130,6 @@ public class TestMockAppStateUniqueNames extends BaseMockAppStateTest
     createAndStartNodes();
     instances = appState.cloneOwnedContainerList();
     verifyInstances(instances, "group1", "group10", "group11", "group12");
-    // fails because the names continue at N+1, with group12, group13, group14
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
index 4352959..69abccf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
@@ -176,11 +176,11 @@ public abstract class BaseMockAppStateTest extends SliderTestBase implements
    */
   public RoleInstance roleInstance(ContainerAssignment assigned) {
     Container target = assigned.container;
-    String failedInstance =
-        assigned.role.getProviderRole().failedInstanceName.poll();
+    RoleInstance failedInstance =
+        assigned.role.getProviderRole().failedInstances.poll();
     RoleInstance ri;
     if (failedInstance != null) {
-      ri = new RoleInstance(target,  assigned.role.getProviderRole(), failedInstance);
+      ri = new RoleInstance(target, failedInstance);
     } else {
       ri = new RoleInstance(target, assigned.role.getProviderRole());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9690f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index e058d6e..57dadb2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -627,6 +627,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
         LOG.error("Incorrect format for ip and host");
         return null;
       }
+      // strip off quotes if any
+      output = output.replaceAll("['\"]", "");
       String ips = output.substring(0, index).trim();
       String host = output.substring(index+1).trim();
       String[] ipAndHost = new String[2];


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: YARN-6255. Refactor yarn-native-services framework. Contributed by Jian He

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
index 98557ce..8e8546b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
@@ -21,19 +21,22 @@ package org.apache.slider.server.appmaster.state;
 import com.codahale.metrics.Metric;
 import com.codahale.metrics.MetricSet;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.slider.api.types.ComponentInformation;
 import org.apache.slider.api.types.RoleStatistics;
 import org.apache.slider.providers.PlacementPolicy;
 import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.server.appmaster.management.BoolMetricPredicate;
-import org.apache.slider.server.appmaster.management.LongGauge;
+import org.apache.slider.server.appmaster.metrics.SliderMetrics;
 
 import java.io.Serializable;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Map;
 
+import static org.apache.hadoop.metrics2.lib.Interns.info;
+
 /**
  * Models the ongoing status of all nodes in an application.
  *
@@ -42,7 +45,7 @@ import java.util.Map;
  * requires synchronization. Where synchronized access is good is that it allows for
  * the whole instance to be locked, for updating multiple entries.
  */
-public final class RoleStatus implements Cloneable, MetricSet {
+public final class RoleStatus implements MetricSet {
 
   private final String name;
   private final String group;
@@ -53,25 +56,9 @@ public final class RoleStatus implements Cloneable, MetricSet {
   private final int key;
   private final ProviderRole providerRole;
 
-  private final LongGauge actual = new LongGauge();
-  private final LongGauge completed = new LongGauge();
-  private final LongGauge desired = new LongGauge();
-  private final LongGauge failed = new LongGauge();
-  private final LongGauge failedRecently = new LongGauge(0);
-  private final LongGauge limitsExceeded = new LongGauge(0);
-  private final LongGauge nodeFailed = new LongGauge(0);
-  /** Number of AA requests queued. */
-  private final LongGauge pendingAntiAffineRequests = new LongGauge(0);
-  private final LongGauge preempted = new LongGauge(0);
-  private final LongGauge releasing = new LongGauge();
-  private final LongGauge requested = new LongGauge();
-  private final LongGauge started = new LongGauge();
-  private final LongGauge startFailed = new LongGauge();
-  private final LongGauge totalRequested = new LongGauge();
-
   /** resource requirements */
   private Resource resourceRequirements;
-
+  private SliderMetrics componentMetrics;
 
   /** any pending AA request */
   private volatile OutstandingRequest outstandingAArequest = null;
@@ -84,28 +71,19 @@ public final class RoleStatus implements Cloneable, MetricSet {
     this.name = providerRole.name;
     this.group = providerRole.group;
     this.key = providerRole.id;
+    componentMetrics =
+        SliderMetrics.register(this.name, "Metrics for component " + this.name);
+    componentMetrics
+        .tag("type", "Metrics type [component or service]", "component");
+  }
+
+  public SliderMetrics getComponentMetrics() {
+    return this.componentMetrics;
   }
 
   @Override
   public Map<String, Metric> getMetrics() {
     Map<String, Metric> metrics = new HashMap<>(15);
-    metrics.put("actual", actual);
-    metrics.put("completed", completed );
-    metrics.put("desired", desired);
-    metrics.put("failed", failed);
-    metrics.put("limitsExceeded", limitsExceeded);
-    metrics.put("nodeFailed", nodeFailed);
-    metrics.put("preempted", preempted);
-    metrics.put("pendingAntiAffineRequests", pendingAntiAffineRequests);
-    metrics.put("releasing", releasing);
-    metrics.put("requested", requested);
-    metrics.put("preempted", preempted);
-    metrics.put("releasing", releasing );
-    metrics.put("requested", requested);
-    metrics.put("started", started);
-    metrics.put("startFailed", startFailed);
-    metrics.put("totalRequested", totalRequested);
-
     metrics.put("outstandingAArequest",
       new BoolMetricPredicate(new BoolMetricPredicate.Eval() {
         @Override
@@ -174,83 +152,6 @@ public final class RoleStatus implements Cloneable, MetricSet {
     return !hasPlacementPolicy(PlacementPolicy.ANYWHERE);
   }
 
-  public long getDesired() {
-    return desired.get();
-  }
-
-  public void setDesired(long desired) {
-    this.desired.set(desired);
-  }
-
-  public long getActual() {
-    return actual.get();
-  }
-
-  public long incActual() {
-    return actual.incrementAndGet();
-  }
-
-  public long decActual() {
-    return actual.decToFloor(1);
-  }
-
-  /**
-   * Get the request count.
-   * @return a count of requested containers
-   */
-  public long getRequested() {
-    return requested.get();
-  }
-
-  public long incRequested() {
-    totalRequested.incrementAndGet();
-    return requested.incrementAndGet();
-  }
-
-  public void cancel(long count) {
-    requested.decToFloor(count);
-  }
-
-  public void decRequested() {
-    cancel(1);
-  }
-
-  public long getReleasing() {
-    return releasing.get();
-  }
-
-  public long incReleasing() {
-    return releasing.incrementAndGet();
-  }
-
-  public long decReleasing() {
-    return releasing.decToFloor(1);
-  }
-
-  public long getFailed() {
-    return failed.get();
-  }
-
-  public long getFailedRecently() {
-    return failedRecently.get();
-  }
-
-  /**
-   * Reset the recent failure
-   * @return the number of failures in the "recent" window
-   */
-  public long resetFailedRecently() {
-    return failedRecently.getAndSet(0);
-  }
-
-  public long getLimitsExceeded() {
-    return limitsExceeded.get();
-  }
-
-  public long incPendingAntiAffineRequests(long v) {
-    return pendingAntiAffineRequests.addAndGet(v);
-  }
-
   /**
    * Probe for an outstanding AA request being true
    * @return true if there is an outstanding AA Request
@@ -271,119 +172,78 @@ public final class RoleStatus implements Cloneable, MetricSet {
    * Note that a role failed, text will
    * be used in any diagnostics if an exception
    * is later raised.
-   * @param startupFailure flag to indicate this was a startup event
    * @param text text about the failure
-   * @param outcome outcome of the container
    */
-  public synchronized void noteFailed(boolean startupFailure, String text,
-      ContainerOutcome outcome) {
+  public synchronized void noteFailed(String text) {
     if (text != null) {
       failureMessage = text;
     }
-    switch (outcome) {
-      case Preempted:
-        preempted.incrementAndGet();
-        break;
-
-      case Node_failure:
-        nodeFailed.incrementAndGet();
-        failed.incrementAndGet();
-        break;
-
-      case Failed_limits_exceeded: // exceeded memory or CPU; app/configuration related
-        limitsExceeded.incrementAndGet();
-        // fall through
-      case Failed: // application failure, possibly node related, possibly not
-      default: // anything else (future-proofing)
-        failed.incrementAndGet();
-        failedRecently.incrementAndGet();
-        //have a look to see if it short lived
-        if (startupFailure) {
-          incStartFailed();
-        }
-        break;
-    }
   }
 
-  public long getStartFailed() {
-    return startFailed.get();
-  }
-
-  public synchronized void incStartFailed() {
-    startFailed.getAndIncrement();
-  }
 
-  public synchronized String getFailureMessage() {
-    return failureMessage;
+  public void setOutstandingAArequest(OutstandingRequest outstandingAArequest) {
+    this.outstandingAArequest = outstandingAArequest;
   }
 
-  public long getCompleted() {
-    return completed.get();
+  /**
+   * Complete the outstanding AA request (there's no check for one in progress, caller
+   * expected to have done that).
+   */
+  public void completeOutstandingAARequest() {
+    setOutstandingAArequest(null);
   }
 
-  public long incCompleted() {
-    return completed.incrementAndGet();
-  }
-  public long getStarted() {
-    return started.get();
+  /**
+   * Cancel any outstanding AA request. Harmless if the role is non-AA, or
+   * if there are no outstanding requests.
+   */
+  public void cancelOutstandingAARequest() {
+    if (outstandingAArequest != null) {
+      setOutstandingAArequest(null);
+    }
   }
 
-  public synchronized void incStarted() {
-    started.incrementAndGet();
+  public long getDesired() {
+    return componentMetrics.containersDesired.value();
   }
 
-  public long getTotalRequested() {
-    return totalRequested.get();
+  long getRunning() {
+    return componentMetrics.containersRunning.value();
   }
 
-  public long getPreempted() {
-    return preempted.get();
+  public long getPending() {
+    return componentMetrics.containersPending.value();
   }
 
-  public long getNodeFailed() {
-    return nodeFailed.get();
+  public long getAAPending() {
+    return componentMetrics.pendingAAContainers.value();
   }
 
-  public long getPendingAntiAffineRequests() {
-    return pendingAntiAffineRequests.get();
+  void decAAPending() {
+    componentMetrics.pendingAAContainers.decr();
   }
-
-  public void setPendingAntiAffineRequests(long pendingAntiAffineRequests) {
-    this.pendingAntiAffineRequests.set(pendingAntiAffineRequests);
+  void setAAPending(long n) {
+    componentMetrics.pendingAAContainers.set((int)n);
   }
 
-  public long decPendingAntiAffineRequests() {
-    return pendingAntiAffineRequests.decToFloor(1);
+  long getFailedRecently() {
+    return componentMetrics.failedSinceLastThreshold.value();
   }
 
-  public OutstandingRequest getOutstandingAArequest() {
-    return outstandingAArequest;
+  long resetFailedRecently() {
+    long count =
+        componentMetrics.failedSinceLastThreshold.value();
+    componentMetrics.failedSinceLastThreshold.set(0);
+    return count;
   }
 
-  public void setOutstandingAArequest(OutstandingRequest outstandingAArequest) {
-    this.outstandingAArequest = outstandingAArequest;
+  long getFailed() {
+    return componentMetrics.containersFailed.value();
   }
 
-  /**
-   * Complete the outstanding AA request (there's no check for one in progress, caller
-   * expected to have done that).
-   */
-  public void completeOutstandingAARequest() {
-    setOutstandingAArequest(null);
-  }
-
-  /**
-   * Cancel any outstanding AA request. Harmless if the role is non-AA, or
-   * if there are no outstanding requests.
-   */
-  public void cancelOutstandingAARequest() {
-    if (outstandingAArequest != null) {
-      setOutstandingAArequest(null);
-      setPendingAntiAffineRequests(0);
-      decRequested();
-    }
+  String getFailureMessage() {
+    return this.failureMessage;
   }
-
   /**
    * Get the number of roles we are short of.
    * nodes released are ignored.
@@ -392,10 +252,9 @@ public final class RoleStatus implements Cloneable, MetricSet {
    */
   public long getDelta() {
     long inuse = getActualAndRequested();
-    long delta = desired.get() - inuse;
+    long delta = getDesired() - inuse;
     if (delta < 0) {
       //if we are releasing, remove the number that are already released.
-      delta += releasing.get();
       //but never switch to a positive
       delta = Math.min(delta, 0);
     }
@@ -407,43 +266,7 @@ public final class RoleStatus implements Cloneable, MetricSet {
    * @return the size of the application when outstanding requests are included.
    */
   public long getActualAndRequested() {
-    return actual.get() + requested.get();
-  }
-
-  @Override
-  public String toString() {
-    final StringBuilder sb = new StringBuilder("RoleStatus{");
-    sb.append("name='").append(name).append('\'');
-    sb.append(", group=").append(group);
-    sb.append(", key=").append(key);
-    sb.append(", desired=").append(desired);
-    sb.append(", actual=").append(actual);
-    sb.append(", requested=").append(requested);
-    sb.append(", releasing=").append(releasing);
-    sb.append(", failed=").append(failed);
-    sb.append(", startFailed=").append(startFailed);
-    sb.append(", started=").append(started);
-    sb.append(", completed=").append(completed);
-    sb.append(", totalRequested=").append(totalRequested);
-    sb.append(", preempted=").append(preempted);
-    sb.append(", nodeFailed=").append(nodeFailed);
-    sb.append(", failedRecently=").append(failedRecently);
-    sb.append(", limitsExceeded=").append(limitsExceeded);
-    sb.append(", resourceRequirements=").append(resourceRequirements);
-    sb.append(", isAntiAffinePlacement=").append(isAntiAffinePlacement());
-    if (isAntiAffinePlacement()) {
-      sb.append(", pendingAntiAffineRequests=").append(pendingAntiAffineRequests);
-      sb.append(", outstandingAArequest=").append(outstandingAArequest);
-    }
-    sb.append(", failureMessage='").append(failureMessage).append('\'');
-    sb.append(", providerRole=").append(providerRole);
-    sb.append('}');
-    return sb.toString();
-  }
-
-  @Override
-  public synchronized  Object clone() throws CloneNotSupportedException {
-    return super.clone();
+    return getRunning() + getPending();
   }
 
   /**
@@ -455,36 +278,12 @@ public final class RoleStatus implements Cloneable, MetricSet {
   }
 
   /**
-   * Build the statistics map from the current data
-   * @return a map for use in statistics reports
-   */
-  public Map<String, Integer> buildStatistics() {
-    ComponentInformation componentInformation = serialize();
-    return componentInformation.buildStatistics();
-  }
-
-  /**
    * Produced a serialized form which can be served up as JSON
    * @return a summary of the current role status.
    */
   public synchronized ComponentInformation serialize() {
     ComponentInformation info = new ComponentInformation();
     info.name = name;
-    info.priority = getPriority();
-    info.desired = desired.intValue();
-    info.actual = actual.intValue();
-    info.requested = requested.intValue();
-    info.releasing = releasing.intValue();
-    info.failed = failed.intValue();
-    info.startFailed = startFailed.intValue();
-    info.placementPolicy = getPlacementPolicy();
-    info.failureMessage = failureMessage;
-    info.totalRequested = totalRequested.intValue();
-    info.failedRecently = failedRecently.intValue();
-    info.nodeFailed = nodeFailed.intValue();
-    info.preempted = preempted.intValue();
-    info.pendingAntiAffineRequestCount = pendingAntiAffineRequests.intValue();
-    info.isAARequestOutstanding = isAARequestOutstanding();
     return info;
   }
 
@@ -542,17 +341,6 @@ public final class RoleStatus implements Cloneable, MetricSet {
   public synchronized RoleStatistics getStatistics() {
     RoleStatistics stats = new RoleStatistics();
     stats.activeAA = getOutstandingAARequestCount();
-    stats.actual = actual.get();
-    stats.desired = desired.get();
-    stats.failed = failed.get();
-    stats.limitsExceeded = limitsExceeded.get();
-    stats.nodeFailed = nodeFailed.get();
-    stats.preempted = preempted.get();
-    stats.releasing = releasing.get();
-    stats.requested = requested.get();
-    stats.started = started.get();
-    stats.startFailed = startFailed.get();
-    stats.totalRequested = totalRequested.get();
     return stats;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
index ad91183..118ca9d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
@@ -21,15 +21,13 @@ package org.apache.slider.server.appmaster.state;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import org.apache.slider.api.ClusterDescription;
 import org.apache.slider.api.ClusterNode;
 import org.apache.slider.api.StatusKeys;
+import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.types.ApplicationLivenessInformation;
 import org.apache.slider.api.types.ComponentInformation;
 import org.apache.slider.api.types.NodeInformation;
 import org.apache.slider.api.types.RoleStatistics;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTreeOperations;
 import org.apache.slider.core.exceptions.NoSuchNodeException;
 import org.apache.slider.core.registry.docstore.PublishedConfigSet;
 import org.apache.slider.core.registry.docstore.PublishedExportsSet;
@@ -105,29 +103,7 @@ public interface StateAccessForProviders {
    * Get the current cluster description 
    * @return the actual state of the cluster
    */
-  ClusterDescription getClusterStatus();
-
-  /**
-   * Get at the snapshot of the resource config
-   * Changes here do not affect the application state.
-   * @return the most recent settings
-   */
-  ConfTreeOperations getResourcesSnapshot();
-
-  /**
-   * Get at the snapshot of the appconf config
-   * Changes here do not affect the application state.
-   * @return the most recent settings
-   */
-  ConfTreeOperations getAppConfSnapshot();
-
-  /**
-   * Get at the snapshot of the internals config.
-   * Changes here do not affect the application state.
-   * @return the internals settings
-   */
-
-  ConfTreeOperations getInternalsSnapshot();
+  Application getApplication();
 
   /**
    * Flag set to indicate the application is live -this only happens
@@ -135,22 +111,8 @@ public interface StateAccessForProviders {
    */
   boolean isApplicationLive();
 
-  long getSnapshotTime();
-
-  /**
-   * Get a snapshot of the entire aggregate configuration
-   * @return the aggregate configuration
-   */
-  AggregateConf getInstanceDefinitionSnapshot();
-
-  /**
-   * Get the desired/unresolved value
-   * @return unresolved
-   */
-  AggregateConf getUnresolvedInstanceDefinition();
-
   /**
-   * Look up a role from its key -or fail 
+   * Look up a role from its key -or fail
    *
    * @param key key to resolve
    * @return the status
@@ -159,7 +121,7 @@ public interface StateAccessForProviders {
   RoleStatus lookupRoleStatus(int key);
 
   /**
-   * Look up a role from its key -or fail 
+   * Look up a role from its key -or fail
    *
    * @param c container in a role
    * @return the status
@@ -232,14 +194,8 @@ public interface StateAccessForProviders {
   /**
    * Update the cluster description with anything interesting
    */
-  ClusterDescription refreshClusterStatus();
+  Application refreshClusterStatus();
 
-  /**
-   * Get a deep clone of the role status list. Concurrent events may mean this
-   * list (or indeed, some of the role status entries) may be inconsistent
-   * @return a snapshot of the role status entries
-   */
-  List<RoleStatus> cloneRoleStatusList();
 
   /**
    * get application liveness information
@@ -248,13 +204,6 @@ public interface StateAccessForProviders {
   ApplicationLivenessInformation getApplicationLivenessInformation();
 
   /**
-   * Get the live statistics map
-   * @return a map of statistics values, defined in the {@link StatusKeys}
-   * keylist.
-   */
-  Map<String, Integer> getLiveStatistics();
-
-  /**
    * Get a snapshot of component information.
    * <p>
    *   This does <i>not</i> include any container list, which 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/SliderAMWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/SliderAMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/SliderAMWebApp.java
index 7ecc00c..0cac430 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/SliderAMWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/SliderAMWebApp.java
@@ -16,6 +16,8 @@
  */
 package org.apache.slider.server.appmaster.web;
 
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.health.HealthCheckRegistry;
 import com.codahale.metrics.servlets.HealthCheckServlet;
 import com.codahale.metrics.servlets.MetricsServlet;
 import com.codahale.metrics.servlets.PingServlet;
@@ -28,10 +30,8 @@ import com.sun.jersey.spi.container.servlet.ServletContainer;
 import org.apache.hadoop.yarn.webapp.Dispatcher;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.WebApp;
-import org.apache.slider.server.appmaster.management.MetricsAndMonitoring;
 import org.apache.slider.server.appmaster.web.rest.AMWadlGeneratorConfig;
 import org.apache.slider.server.appmaster.web.rest.AMWebServices;
-import static org.apache.slider.server.appmaster.web.rest.RestPaths.*;
 import org.apache.slider.server.appmaster.web.rest.SliderJacksonJaxbJsonProvider;
 
 import java.util.HashMap;
@@ -39,6 +39,8 @@ import java.util.Map;
 import java.util.logging.Level;
 import java.util.logging.Logger;
 
+import static org.apache.slider.server.appmaster.web.rest.RestPaths.*;
+
 /**
  * 
  */
@@ -65,6 +67,7 @@ public class SliderAMWebApp extends WebApp {
     bind(GenericExceptionHandler.class);
     // bind the REST interface
     bind(AMWebServices.class);
+
     //bind(AMAgentWebServices.class);
     route("/", SliderAMController.class);
     route(CONTAINER_STATS, SliderAMController.class, "containerStats");
@@ -81,11 +84,9 @@ public class SliderAMWebApp extends WebApp {
       serve(path).with(Dispatcher.class);
     }
 
-    // metrics
-    MetricsAndMonitoring monitoring =
-        webAppApi.getMetricsAndMonitoring();
-    serve(SYSTEM_HEALTHCHECK).with(new HealthCheckServlet(monitoring.getHealth()));
-    serve(SYSTEM_METRICS).with(new MetricsServlet(monitoring.getMetrics()));
+    serve(SYSTEM_HEALTHCHECK)
+        .with(new HealthCheckServlet(new HealthCheckRegistry()));
+    serve(SYSTEM_METRICS).with(new MetricsServlet(new MetricRegistry()));
     serve(SYSTEM_PING).with(new PingServlet());
     serve(SYSTEM_THREADS).with(new ThreadDumpServlet());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApi.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApi.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApi.java
index ea07a8a..094726d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApi.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApi.java
@@ -22,7 +22,6 @@ import org.apache.slider.server.appmaster.actions.QueueAccess;
 import org.apache.slider.server.appmaster.management.MetricsAndMonitoring;
 import org.apache.slider.server.appmaster.state.AppState;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
-import org.apache.slider.server.appmaster.web.rest.application.resources.ContentCache;
 
 /**
  * Interface to pass information from the Slider AppMaster to the WebApp
@@ -56,10 +55,4 @@ public interface WebAppApi {
    * @return the immediate and scheduled queues
    */
   QueueAccess getQueues();
-
-  /**
-   * Local cache of content
-   * @return the cache
-   */
-  ContentCache getContentCache();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApiImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApiImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApiImpl.java
index d20f1ad..fd9381c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApiImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/WebAppApiImpl.java
@@ -18,11 +18,9 @@ package org.apache.slider.server.appmaster.web;
 
 import org.apache.hadoop.registry.client.api.RegistryOperations;
 import org.apache.slider.providers.ProviderService;
-import org.apache.slider.server.appmaster.AppMasterActionOperations;
 import org.apache.slider.server.appmaster.actions.QueueAccess;
 import org.apache.slider.server.appmaster.management.MetricsAndMonitoring;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
-import org.apache.slider.server.appmaster.web.rest.application.resources.ContentCache;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -39,15 +37,10 @@ public class WebAppApiImpl implements WebAppApi {
   private final RegistryOperations registryOperations;
   private final MetricsAndMonitoring metricsAndMonitoring;
   private final QueueAccess queues;
-  private final AppMasterActionOperations appMasterOperations;
-  private final ContentCache contentCache;
 
   public WebAppApiImpl(StateAccessForProviders appState,
       ProviderService provider, RegistryOperations registryOperations,
-      MetricsAndMonitoring metricsAndMonitoring, QueueAccess queues,
-      AppMasterActionOperations appMasterOperations, ContentCache contentCache) {
-    this.appMasterOperations = appMasterOperations;
-    this.contentCache = contentCache;
+      MetricsAndMonitoring metricsAndMonitoring, QueueAccess queues) {
     checkNotNull(appState);
     checkNotNull(provider);
     this.queues = queues;
@@ -82,10 +75,4 @@ public class WebAppApiImpl implements WebAppApi {
   public QueueAccess getQueues() {
     return queues;
   }
-
-
-  @Override
-  public ContentCache getContentCache() {
-    return contentCache;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java
index aed87d8..e73dd87 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java
@@ -18,13 +18,24 @@ package org.apache.slider.server.appmaster.web.rest;
 
 import com.google.inject.Inject;
 import com.google.inject.Singleton;
+import org.apache.slider.api.resource.Application;
 import org.apache.slider.server.appmaster.web.WebAppApi;
-import org.apache.slider.server.appmaster.web.rest.application.ApplicationResource;
+import org.apache.slider.server.appmaster.web.rest.application.actions.RestActionStop;
+import org.apache.slider.server.appmaster.web.rest.application.actions.StopResponse;
 import org.apache.slider.server.appmaster.web.rest.management.ManagementResource;
 import org.apache.slider.server.appmaster.web.rest.publisher.PublisherResource;
 import org.apache.slider.server.appmaster.web.rest.registry.RegistryResource;
 
+import javax.servlet.http.HttpServletRequest;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
 import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.UriInfo;
+
+import static javax.ws.rs.core.MediaType.APPLICATION_JSON;
+import static org.apache.slider.server.appmaster.web.rest.RestPaths.ACTION_STOP;
 
 /**
  *  The available REST services exposed by a slider AM. 
@@ -38,7 +49,6 @@ public class AMWebServices {
   private final ManagementResource managementResource;
   private final PublisherResource publisherResource;
   private final RegistryResource registryResource;
-  private final ApplicationResource applicationResource;
 
   @Inject
   public AMWebServices(WebAppApi slider) {
@@ -46,7 +56,6 @@ public class AMWebServices {
     managementResource = new ManagementResource(slider);
     publisherResource = new PublisherResource(slider);
     registryResource = new RegistryResource(slider);
-    applicationResource = new ApplicationResource(slider);
   }
 
   @Path(RestPaths.SLIDER_SUBPATH_MANAGEMENT)
@@ -63,9 +72,21 @@ public class AMWebServices {
   public RegistryResource getRegistryResource() {
     return registryResource;
   }
-  
+
+
+  @GET
   @Path(RestPaths.SLIDER_SUBPATH_APPLICATION)
-  public ApplicationResource getApplicationResource() {
-    return applicationResource;
+  @Produces({APPLICATION_JSON})
+  public Application getApplicationResource() {
+    return slider.getAppState().getApplication();
+  }
+
+  @POST
+  @Path(ACTION_STOP)
+  @Produces({APPLICATION_JSON})
+  public StopResponse actionStop(@Context HttpServletRequest request,
+      @Context UriInfo uriInfo,
+      String body) {
+    return new RestActionStop(slider).stop(request, uriInfo, body);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/RestPaths.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/RestPaths.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/RestPaths.java
index ae9eb0f..581f5b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/RestPaths.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/RestPaths.java
@@ -34,11 +34,8 @@ public class RestPaths {
   /**
    * agent content root: {@value}
    */
-  public static final String WS_AGENT_CONTEXT_ROOT = "/" + AGENT_WS_CONTEXT;
-  public static final String V1_SLIDER = "/v1/slider";
-  public static final String SLIDER_CONTEXT_ROOT = WS_CONTEXT_ROOT + V1_SLIDER;
-  public static final String RELATIVE_API = WS_CONTEXT + V1_SLIDER;
-  public static final String SLIDER_AGENT_CONTEXT_ROOT = WS_AGENT_CONTEXT_ROOT + V1_SLIDER;
+  public static final String SLIDER_CONTEXT_ROOT = WS_CONTEXT_ROOT + "/v1";
+  public static final String RELATIVE_API = WS_CONTEXT + "/v1";
   public static final String MANAGEMENT = "mgmt";
   public static final String SLIDER_SUBPATH_MANAGEMENT = "/" + MANAGEMENT;
   public static final String SLIDER_SUBPATH_AGENTS = "/agents";
@@ -46,21 +43,6 @@ public class RestPaths {
 
 
   /**
-   * management path: {@value}
-   */
-  public static final String SLIDER_PATH_MANAGEMENT = SLIDER_CONTEXT_ROOT
-                                      + SLIDER_SUBPATH_MANAGEMENT;
-
-  public static final String RELATIVE_PATH_MANAGEMENT = RELATIVE_API
-                                      + SLIDER_SUBPATH_MANAGEMENT;
-
-  /**
-   * Agents: {@value}
-   */
-  public static final String SLIDER_PATH_AGENTS = SLIDER_AGENT_CONTEXT_ROOT
-                                      + SLIDER_SUBPATH_AGENTS;
-  
-  /**
    * Publisher: {@value}
    */
   public static final String SLIDER_PATH_PUBLISHER = SLIDER_CONTEXT_ROOT
@@ -105,6 +87,7 @@ public class RestPaths {
   public static final String SYSTEM = "/system";
 
 
+
   /**
    * Codahale Metrics - health: {@value}
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/ApplicationResouceContentCacheFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/ApplicationResouceContentCacheFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/ApplicationResouceContentCacheFactory.java
index 2facf16..d23fcee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/ApplicationResouceContentCacheFactory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/ApplicationResouceContentCacheFactory.java
@@ -19,17 +19,7 @@
 package org.apache.slider.server.appmaster.web.rest.application;
 
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
-import org.apache.slider.server.appmaster.web.rest.application.resources.AggregateModelRefresher;
-import org.apache.slider.server.appmaster.web.rest.application.resources.AppconfRefresher;
-import org.apache.slider.server.appmaster.web.rest.application.resources.CachedContent;
 import org.apache.slider.server.appmaster.web.rest.application.resources.ContentCache;
-import org.apache.slider.server.appmaster.web.rest.application.resources.LiveComponentsRefresher;
-import org.apache.slider.server.appmaster.web.rest.application.resources.LiveContainersRefresher;
-import org.apache.slider.server.appmaster.web.rest.application.resources.LiveNodesRefresher;
-import org.apache.slider.server.appmaster.web.rest.application.resources.LiveResourcesRefresher;
-import org.apache.slider.server.appmaster.web.rest.application.resources.LiveStatisticsRefresher;
-
-import static org.apache.slider.server.appmaster.web.rest.RestPaths.*;
 
 public class ApplicationResouceContentCacheFactory {
   public static final int LIFESPAN = 500;
@@ -41,23 +31,6 @@ public class ApplicationResouceContentCacheFactory {
   public static ContentCache createContentCache(
       StateAccessForProviders state) {
     ContentCache cache = new ContentCache();
-    cache.put(LIVE_RESOURCES, new CachedContent<>(LIFESPAN, new LiveResourcesRefresher(state)));
-    cache.put(LIVE_CONTAINERS, new CachedContent<>(LIFESPAN, new LiveContainersRefresher(state)));
-    cache.put(LIVE_COMPONENTS, new CachedContent<>(LIFESPAN, new LiveComponentsRefresher(state)));
-    cache.put(LIVE_NODES, new CachedContent<>(LIFESPAN, new LiveNodesRefresher(state)));
-    cache.put(MODEL_DESIRED,
-        new CachedContent<>(LIFESPAN, new AggregateModelRefresher(state, false)));
-    cache.put(MODEL_RESOLVED,
-        new CachedContent<>(LIFESPAN, new AggregateModelRefresher(state, true)));
-    cache.put(MODEL_RESOLVED_APPCONF,
-        new CachedContent<>(LIFESPAN, new AppconfRefresher(state, false, false)));
-    cache.put(MODEL_RESOLVED_RESOURCES,
-        new CachedContent<>(LIFESPAN, new AppconfRefresher(state, false, true)));
-    cache.put(MODEL_DESIRED_APPCONF,
-        new CachedContent<>(LIFESPAN, new AppconfRefresher(state, true, false)));
-    cache.put(MODEL_DESIRED_RESOURCES,
-        new CachedContent<>(LIFESPAN, new AppconfRefresher(state, true, true)));
-    cache.put(LIVE_STATISTICS, new CachedContent<>(LIFESPAN, new LiveStatisticsRefresher(state)));
     return cache;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/ApplicationResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/ApplicationResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/ApplicationResource.java
deleted file mode 100644
index 52068d6..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/ApplicationResource.java
+++ /dev/null
@@ -1,516 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.appmaster.web.rest.application;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-import org.apache.hadoop.yarn.webapp.BadRequestException;
-import org.apache.hadoop.yarn.webapp.NotFoundException;
-import org.apache.slider.api.types.ApplicationLivenessInformation;
-import org.apache.slider.api.types.ComponentInformation;
-import org.apache.slider.api.types.ContainerInformation;
-import org.apache.slider.api.types.NodeInformation;
-import org.apache.slider.api.types.NodeInformationList;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTree;
-import org.apache.slider.core.exceptions.NoSuchNodeException;
-import org.apache.slider.core.persist.ConfTreeSerDeser;
-import org.apache.slider.server.appmaster.actions.ActionFlexCluster;
-import org.apache.slider.server.appmaster.actions.AsyncAction;
-import org.apache.slider.server.appmaster.actions.QueueAccess;
-import org.apache.slider.server.appmaster.state.RoleInstance;
-import org.apache.slider.server.appmaster.state.StateAccessForProviders;
-import org.apache.slider.server.appmaster.web.WebAppApi;
-import org.apache.slider.server.appmaster.web.rest.AbstractSliderResource;
-import static org.apache.slider.server.appmaster.web.rest.RestPaths.*;
-
-import org.apache.slider.server.appmaster.web.rest.application.actions.RestActionStop;
-import org.apache.slider.server.appmaster.web.rest.application.actions.StopResponse;
-import org.apache.slider.server.appmaster.web.rest.application.resources.ContentCache;
-import org.apache.slider.server.appmaster.web.rest.application.actions.RestActionPing;
-import org.apache.slider.api.types.PingInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.inject.Singleton;
-import javax.servlet.http.HttpServletRequest;
-import javax.ws.rs.Consumes;
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.HEAD;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.Context;
-
-import static javax.ws.rs.core.MediaType.*;
-
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-@Singleton
-@SuppressWarnings("unchecked")
-public class ApplicationResource extends AbstractSliderResource {
-  private static final Logger log =
-      LoggerFactory.getLogger(ApplicationResource.class);
-
-  public static final List<String> LIVE_ENTRIES = toJsonList("resources",
-      "containers",
-      "components",
-      "nodes",
-      "statistics",
-      "internal");
-
-  public static final List<String> ROOT_ENTRIES =
-      toJsonList("model", "live", "actions");
-
-  public static final List<String> MODEL_ENTRIES =
-      toJsonList("desired", "resolved");
-
-  /**
-   * This is the cache of all content ... each entry is
-   * designed to be self-refreshing on get operations, 
-   * so is never very out of date, yet many GETs don't
-   * overload the rest of the system.
-   */
-  private final ContentCache cache;
-  private final StateAccessForProviders state;
-  private final QueueAccess actionQueues;
-
-  public ApplicationResource(WebAppApi slider) {
-    super(slider);
-    state = slider.getAppState();
-    cache = slider.getContentCache();
-    actionQueues = slider.getQueues();
-  }
-
-  /**
-   * Build a new JSON-marshallable list of string elements
-   * @param elements elements
-   * @return something that can be returned
-   */
-  private static List<String> toJsonList(String... elements) {
-    return Lists.newArrayList(elements);
-  }
-
-  @GET
-  @Path("/")
-  @Produces({APPLICATION_JSON})
-  public List<String> getRoot() {
-    markGet(SLIDER_SUBPATH_APPLICATION);
-    return ROOT_ENTRIES;
-  }
-
-  /**
-   * Enum model values: desired and resolved
-   * @return the desired and resolved model
-   */
-  @GET
-  @Path(MODEL)
-  @Produces({APPLICATION_JSON})
-  public List<String> getModel() {
-    markGet(SLIDER_SUBPATH_APPLICATION, MODEL);
-    return MODEL_ENTRIES;
-  }
-
-  @GET
-  @Path(MODEL_DESIRED)
-  @Produces({APPLICATION_JSON})
-  public AggregateConf getModelDesired() {
-    markGet(SLIDER_SUBPATH_APPLICATION, MODEL_DESIRED);
-    return lookupAggregateConf(MODEL_DESIRED);
-  }
-  
-  @GET
-  @Path(MODEL_DESIRED_APPCONF)
-  @Produces({APPLICATION_JSON})
-  public ConfTree getModelDesiredAppconf() {
-    markGet(SLIDER_SUBPATH_APPLICATION, MODEL_DESIRED_APPCONF);
-    return lookupConfTree(MODEL_DESIRED_APPCONF);
-  }
-
-  @GET
-  @Path(MODEL_DESIRED_RESOURCES)
-  @Produces({APPLICATION_JSON})
-  public ConfTree getModelDesiredResources() {
-    markGet(SLIDER_SUBPATH_APPLICATION, MODEL_DESIRED_RESOURCES);
-    return lookupConfTree(MODEL_DESIRED_RESOURCES);
-  }
-
-/*
-  @PUT
-  @Path(MODEL_DESIRED_RESOURCES)
-//  @Consumes({APPLICATION_JSON, TEXT_PLAIN})
-  @Consumes({TEXT_PLAIN})
-  @Produces({APPLICATION_JSON})
-*/
-  public ConfTree setModelDesiredResources(
-      String json) {
-    markPut(SLIDER_SUBPATH_APPLICATION, MODEL_DESIRED_RESOURCES);
-    int size = json != null ? json.length() : 0;
-    log.info("PUT {} {} bytes:\n{}", MODEL_DESIRED_RESOURCES,
-        size,
-        json);
-    if (size == 0) {
-      log.warn("No JSON in PUT request; rejecting");
-      throw new BadRequestException("No JSON in PUT");
-    }
-    
-    try {
-      ConfTreeSerDeser serDeser = new ConfTreeSerDeser();
-      ConfTree updated = serDeser.fromJson(json);
-      queue(new ActionFlexCluster("flex",
-          1, TimeUnit.MILLISECONDS,
-          updated));
-      // return the updated value, even though it potentially hasn't yet
-      // been executed
-      return updated;
-    } catch (Exception e) {
-      throw buildException("PUT to "+ MODEL_DESIRED_RESOURCES , e);
-    }
-  }
-  @PUT
-  @Path(MODEL_DESIRED_RESOURCES)
-  @Consumes({APPLICATION_JSON})
-  @Produces({APPLICATION_JSON})
-  public ConfTree setModelDesiredResources(
-      ConfTree updated) {
-    try {
-      queue(new ActionFlexCluster("flex",
-          1, TimeUnit.MILLISECONDS,
-          updated));
-      // return the updated value, even though it potentially hasn't yet
-      // been executed
-      return updated;
-    } catch (Exception e) {
-      throw buildException("PUT to "+ MODEL_DESIRED_RESOURCES , e);
-    }
-  }
-  
-  
-
-  @GET
-  @Path(MODEL_RESOLVED)
-  @Produces({APPLICATION_JSON})
-  public AggregateConf getModelResolved() {
-    markGet(SLIDER_SUBPATH_APPLICATION, MODEL_RESOLVED);
-    return lookupAggregateConf(MODEL_RESOLVED);
-  }
-
-  @GET
-  @Path(MODEL_RESOLVED_APPCONF)
-  @Produces({APPLICATION_JSON})
-  public ConfTree getModelResolvedAppconf() {
-    markGet(SLIDER_SUBPATH_APPLICATION, MODEL_RESOLVED_APPCONF);
-    return lookupConfTree(MODEL_RESOLVED_APPCONF);
-  }
-
-  @GET
-  @Path(MODEL_RESOLVED_RESOURCES)
-  @Produces({APPLICATION_JSON})
-  public ConfTree getModelResolvedResources() {
-    markGet(SLIDER_SUBPATH_APPLICATION, MODEL_RESOLVED_RESOURCES);
-    return lookupConfTree(MODEL_RESOLVED_RESOURCES);
-  }
-  
-  @GET
-  @Path(LIVE)
-  @Produces({APPLICATION_JSON})
-  public List<String> getLive() {
-    markGet(SLIDER_SUBPATH_APPLICATION, LIVE);
-    return LIVE_ENTRIES;
-  }
-
-  @GET
-  @Path(LIVE_RESOURCES)
-  @Produces({APPLICATION_JSON})
-  public ConfTree getLiveResources() {
-    markGet(SLIDER_SUBPATH_APPLICATION, LIVE_RESOURCES);
-    return lookupConfTree(LIVE_RESOURCES);
-  }
-  
-  @GET
-  @Path(LIVE_CONTAINERS)
-  @Produces({APPLICATION_JSON})
-  public Map<String, ContainerInformation> getLiveContainers() {
-    markGet(SLIDER_SUBPATH_APPLICATION, LIVE_CONTAINERS);
-    try {
-      return (Map<String, ContainerInformation>)cache.lookup(
-          LIVE_CONTAINERS);
-    } catch (Exception e) {
-      throw buildException(LIVE_CONTAINERS, e);
-    }
-  }
-
-  @GET
-  @Path(LIVE_CONTAINERS + "/{containerId}")
-  @Produces({APPLICATION_JSON})
-  public ContainerInformation getLiveContainer(
-      @PathParam("containerId") String containerId) {
-    markGet(SLIDER_SUBPATH_APPLICATION, LIVE_CONTAINERS);
-    try {
-      RoleInstance id = state.getLiveInstanceByContainerID(containerId);
-      return id.serialize();
-    } catch (NoSuchNodeException e) {
-      throw new NotFoundException("Unknown container: " + containerId);
-    } catch (Exception e) {
-      throw buildException(LIVE_CONTAINERS + "/"+ containerId, e);
-    }
-  }
-
-  @GET
-  @Path(LIVE_COMPONENTS)
-  @Produces({APPLICATION_JSON})
-  public Map<String, ComponentInformation> getLiveComponents() {
-    markGet(SLIDER_SUBPATH_APPLICATION, LIVE_COMPONENTS);
-    try {
-      return (Map<String, ComponentInformation>) cache.lookup(LIVE_COMPONENTS);
-    } catch (Exception e) {
-      throw buildException(LIVE_COMPONENTS, e);
-    }
-  }
-  
-  @GET
-  @Path(LIVE_COMPONENTS + "/{component}")
-  @Produces({APPLICATION_JSON})
-  public ComponentInformation getLiveComponent(
-      @PathParam("component") String component) {
-    markGet(SLIDER_SUBPATH_APPLICATION, LIVE_COMPONENTS);
-    try {
-      return state.getComponentInformation(component);
-    } catch (YarnRuntimeException e) {
-      throw new NotFoundException("Unknown component: " + component);
-    } catch (Exception e) {
-      throw buildException(LIVE_CONTAINERS +"/" + component, e);
-    }
-  }
-
-  /**
-   * Liveness information for the application as a whole
-   * @return snapshot of liveness
-   */
-  @GET
-  @Path(LIVE_LIVENESS)
-  @Produces({APPLICATION_JSON})
-  public ApplicationLivenessInformation getLivenessInformation() {
-    markGet(SLIDER_SUBPATH_APPLICATION, LIVE_LIVENESS);
-    try {
-      return state.getApplicationLivenessInformation();
-    } catch (Exception e) {
-      throw buildException(LIVE_CONTAINERS, e);
-    }
-  }
-
-/*
-TODO: decide what structure to return here, then implement
-
-  @GET
-  @Path(LIVE_LIVENESS + "/{component}")
-  @Produces({APPLICATION_JSON})
-  public ApplicationLivenessInformation getLivenessForComponent(
-      @PathParam("component") String component) {
-    markGet(SLIDER_SUBPATH_APPLICATION, LIVE_COMPONENTS);
-    try {
-      RoleStatus roleStatus = state.lookupRoleStatus(component);
-      ApplicationLivenessInformation info = new ApplicationLivenessInformation();
-      info.requested = roleStatus.getRequested();
-      info.allRequestsSatisfied = info.requested == 0;
-      return info;
-    } catch (YarnRuntimeException e) {
-      throw new NotFoundException("Unknown component: " + component);
-    } catch (Exception e) {
-      throw buildException(LIVE_LIVENESS + "/" + component, e);
-    }
-  }
-*/
-
-
-  @GET
-  @Path(LIVE_NODES)
-  @Produces({APPLICATION_JSON})
-  public NodeInformationList getLiveNodes() {
-    markGet(SLIDER_SUBPATH_APPLICATION, LIVE_COMPONENTS);
-    try {
-      return (NodeInformationList) cache.lookup(LIVE_NODES);
-    } catch (Exception e) {
-      throw buildException(LIVE_COMPONENTS, e);
-    }
-  }
-
-  @GET
-  @Path(LIVE_NODES + "/{hostname}")
-  @Produces({APPLICATION_JSON})
-  public NodeInformation getLiveNode(@PathParam("hostname") String hostname) {
-    markGet(SLIDER_SUBPATH_APPLICATION, LIVE_COMPONENTS);
-    try {
-      NodeInformation ni = state.getNodeInformation(hostname);
-      if (ni != null) {
-        return ni;
-      } else {
-        throw new NotFoundException("Unknown node: " + hostname);
-      }
-    } catch (NotFoundException e) {
-      throw e;
-    } catch (Exception e) {
-      throw buildException(LIVE_COMPONENTS + "/" + hostname, e);
-    }
-  }
-
-  /**
-   * Statistics of the application
-   * @return snapshot statistics
-   */
-  @GET
-  @Path(LIVE_STATISTICS)
-  @Produces({APPLICATION_JSON})
-  public Map<String, Integer> getLiveStatistics() {
-    markGet(SLIDER_SUBPATH_APPLICATION, LIVE_LIVENESS);
-    try {
-      return (Map<String, Integer>) cache.lookup(LIVE_STATISTICS);
-    } catch (Exception e) {
-      throw buildException(LIVE_STATISTICS, e);
-    }
-  }
-
-  /**
-   * Helper method; look up an aggregate configuration in the cache from
-   * a key, or raise an exception
-   * @param key key to resolve
-   * @return the configuration
-   * @throws WebApplicationException on a failure
-   */
-  protected AggregateConf lookupAggregateConf(String key) {
-    try {
-      return (AggregateConf) cache.lookup(key);
-    } catch (Exception e) {
-      throw buildException(key, e);
-    }
-  }
-
-
-  /**
-   * Helper method; look up an conf tree in the cache from
-   * a key, or raise an exception
-   * @param key key to resolve
-   * @return the configuration
-   * @throws WebApplicationException on a failure
-   */
-  protected ConfTree lookupConfTree(String key) {
-    try {
-      return (ConfTree) cache.lookup(key);
-    } catch (Exception e) {
-      throw buildException(key, e);
-    }
-  }
-
-  /* ************************************************************************
-  
-  ACTION PING
-  
-  **************************************************************************/
-  
-  @GET
-  @Path(ACTION_PING)
-  @Produces({APPLICATION_JSON})
-  public PingInformation actionPingGet(@Context HttpServletRequest request,
-      @Context UriInfo uriInfo) {
-    markGet(SLIDER_SUBPATH_APPLICATION, ACTION_PING);
-    return new RestActionPing().ping(request, uriInfo, "");
-  }
-  
-  @POST
-  @Path(ACTION_PING)
-  @Produces({APPLICATION_JSON})
-  public PingInformation actionPingPost(@Context HttpServletRequest request,
-      @Context UriInfo uriInfo,
-      String body) {
-    markPost(SLIDER_SUBPATH_APPLICATION, ACTION_PING);
-    return new RestActionPing().ping(request, uriInfo, body);
-  }
-  
-  @PUT
-  @Path(ACTION_PING)
-  @Consumes({TEXT_PLAIN})
-  @Produces({APPLICATION_JSON})
-  public PingInformation actionPingPut(@Context HttpServletRequest request,
-      @Context UriInfo uriInfo,
-      String body) {
-    markPut(SLIDER_SUBPATH_APPLICATION, ACTION_PING);
-    return new RestActionPing().ping(request, uriInfo, body);
-  }
-  
-  @DELETE
-  @Path(ACTION_PING)
-  @Consumes({APPLICATION_JSON})
-  @Produces({APPLICATION_JSON})
-  public PingInformation actionPingDelete(@Context HttpServletRequest request,
-      @Context UriInfo uriInfo) {
-    markDelete(SLIDER_SUBPATH_APPLICATION, ACTION_PING);
-    return new RestActionPing().ping(request, uriInfo, "");
-  }
-  
-  @HEAD
-  @Path(ACTION_PING)
-  public Object actionPingHead(@Context HttpServletRequest request,
-      @Context UriInfo uriInfo) {
-    mark("HEAD", SLIDER_SUBPATH_APPLICATION, ACTION_PING);
-    return new RestActionPing().ping(request, uriInfo, "");
-  }
-  
-  /* ************************************************************************
-  
-  ACTION STOP
-  
-  **************************************************************************/
-
-
-  @POST
-  @Path(ACTION_STOP)
-  @Produces({APPLICATION_JSON})
-  public StopResponse actionStop(@Context HttpServletRequest request,
-      @Context UriInfo uriInfo,
-      String body) {
-    markPost(SLIDER_SUBPATH_APPLICATION, ACTION_STOP);
-    return new RestActionStop(slider).stop(request, uriInfo, body);
-  }
-
-  /**
-   * Schedule an action
-   * @param action for delayed execution
-   */
-  public void schedule(AsyncAction action) {
-    actionQueues.schedule(action);
-  }
-
-  /**
-   * Put an action on the immediate queue -to be executed when the queue
-   * reaches it.
-   * @param action action to queue
-   */
-  public void queue(AsyncAction action) {
-    actionQueues.put(action);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AggregateModelRefresher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AggregateModelRefresher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AggregateModelRefresher.java
index ee28abf..261e66e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AggregateModelRefresher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AggregateModelRefresher.java
@@ -23,7 +23,6 @@ import org.apache.slider.server.appmaster.state.StateAccessForProviders;
 
 /**
  * Refresh the aggregate desired model via
- * {@link StateAccessForProviders#getInstanceDefinitionSnapshot()}
  */
 public class AggregateModelRefresher
     implements ResourceRefresher<AggregateConf> {
@@ -39,9 +38,6 @@ public class AggregateModelRefresher
 
   @Override
   public AggregateConf refresh() throws Exception {
-    return
-        resolved ?
-          state.getInstanceDefinitionSnapshot()
-          : state.getUnresolvedInstanceDefinition();
+    return new AggregateConf();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AppconfRefresher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AppconfRefresher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AppconfRefresher.java
index 06460cc..190a51e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AppconfRefresher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AppconfRefresher.java
@@ -44,10 +44,7 @@ public class AppconfRefresher
 
   @Override
   public ConfTree refresh() throws Exception {
-    AggregateConf aggregateConf =
-        unresolved ?
-        state.getUnresolvedInstanceDefinition():
-        state.getInstanceDefinitionSnapshot();
+    AggregateConf aggregateConf = new AggregateConf();
     ConfTree ct = resources ? aggregateConf.getResources() 
                             : aggregateConf.getAppConf();
     return new ConfTreeSerDeser().fromInstance(ct);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveResourcesRefresher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveResourcesRefresher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveResourcesRefresher.java
deleted file mode 100644
index f988297..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveResourcesRefresher.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.appmaster.web.rest.application.resources;
-
-import org.apache.slider.api.StatusKeys;
-import org.apache.slider.core.conf.ConfTree;
-import org.apache.slider.core.conf.ConfTreeOperations;
-import org.apache.slider.server.appmaster.state.RoleStatus;
-import org.apache.slider.server.appmaster.state.StateAccessForProviders;
-
-import java.util.Map;
-
-public class LiveResourcesRefresher implements ResourceRefresher<ConfTree> {
-
-  private final StateAccessForProviders state;
-
-  public LiveResourcesRefresher(StateAccessForProviders state) {
-    this.state = state;
-  }
-
-  @Override
-  public ConfTree refresh() throws Exception {
-
-    // snapshot resources
-    ConfTreeOperations resources = state.getResourcesSnapshot();
-    // then add actual values
-    Map<Integer, RoleStatus> roleStatusMap = state.getRoleStatusMap();
-    
-    for (RoleStatus status : roleStatusMap.values()) {
-      String name = status.getName();
-      resources.setComponentOpt(name,
-          StatusKeys.COMPONENT_INSTANCES_REQUESTING,
-          status.getRequested());
-      resources.setComponentOpt(name,
-          StatusKeys.COMPONENT_INSTANCES_ACTUAL,
-          status.getActual());
-      resources.setComponentOpt(name,
-          StatusKeys.COMPONENT_INSTANCES_RELEASING,
-          status.getReleasing());
-      resources.setComponentOpt(name,
-          StatusKeys.COMPONENT_INSTANCES_FAILED,
-          status.getFailed());
-      resources.setComponentOpt(name,
-          StatusKeys.COMPONENT_INSTANCES_COMPLETED,
-          status.getCompleted());
-      resources.setComponentOpt(name,
-          StatusKeys.COMPONENT_INSTANCES_STARTED,
-          status.getStarted());
-    }
-    return resources.getConfTree();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveStatisticsRefresher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveStatisticsRefresher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveStatisticsRefresher.java
deleted file mode 100644
index d31b455..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveStatisticsRefresher.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.appmaster.web.rest.application.resources;
-
-import org.apache.slider.server.appmaster.state.StateAccessForProviders;
-
-import java.util.Map;
-
-public class LiveStatisticsRefresher implements ResourceRefresher<Map<String,Integer>> {
-
-  private final StateAccessForProviders state;
-
-  public LiveStatisticsRefresher(StateAccessForProviders state) {
-    this.state = state;
-  }
-
-  @Override
-  public Map<String, Integer> refresh() throws Exception {
-
-    // snapshot resources
-    return state.getLiveStatistics();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/ResourceSnapshotRefresher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/ResourceSnapshotRefresher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/ResourceSnapshotRefresher.java
deleted file mode 100644
index c16912a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/ResourceSnapshotRefresher.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.appmaster.web.rest.application.resources;
-
-import org.apache.slider.core.conf.ConfTree;
-import org.apache.slider.core.conf.ConfTreeOperations;
-import org.apache.slider.server.appmaster.state.StateAccessForProviders;
-
-public class ResourceSnapshotRefresher implements ResourceRefresher<ConfTree> {
-
-  private final StateAccessForProviders state;
-
-  public ResourceSnapshotRefresher(StateAccessForProviders state) {
-    this.state = state;
-  }
-
-  @Override
-  public ConfTree refresh() throws Exception {
-
-    // snapshot resources
-    ConfTreeOperations resources = state.getResourcesSnapshot();
-      return resources.getConfTree();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/ManagementResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/ManagementResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/ManagementResource.java
index f27711a..14d9400 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/ManagementResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/ManagementResource.java
@@ -88,6 +88,7 @@ public class ManagementResource extends AbstractSliderResource {
   }
 
   protected AggregateConf getAggregateConf() {
-    return slider.getAppState().getInstanceDefinitionSnapshot();
+    //TODO
+    return  new AggregateConf();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ClusterSpecificationBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ClusterSpecificationBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ClusterSpecificationBlock.java
index 2f02f27..79b687f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ClusterSpecificationBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ClusterSpecificationBlock.java
@@ -49,7 +49,7 @@ public class ClusterSpecificationBlock extends SliderHamletBlock {
    * @return
    */
   private String getJson() {
-    return appState.getClusterStatus().toString();
+    return appState.getApplication().toString();
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ContainerStatsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ContainerStatsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ContainerStatsBlock.java
index 8b7d695..4796d6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ContainerStatsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/ContainerStatsBlock.java
@@ -26,8 +26,8 @@ import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
-import org.apache.slider.api.ClusterDescription;
 import org.apache.slider.api.ClusterNode;
+import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.types.ComponentInformation;
 import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.apache.slider.server.appmaster.web.WebAppApi;
@@ -141,18 +141,10 @@ public class ContainerStatsBlock extends SliderHamletBlock {
 
           }));
 
-      ClusterDescription desc = appState.getClusterStatus();
-      Map<String, String> options = desc.getRole(name);
+      Application application = appState.getApplication();
       Iterable<Entry<TableContent, String>> tableContent;
-      
-      // Generate the pairs of data in the expected form
-      if (null != options) {
-        tableContent = Iterables.transform(options.entrySet(), stringStringPairFunc);
-      } else {
-        // Or catch that we have no options and provide "empty"
-        tableContent = Collections.emptySet();
-      }
-      
+      tableContent = Collections.emptySet();
+
       // Generate the options used by this role
       generateRoleDetails(div, "role-options-wrap", "Role Options", tableContent);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java
index 2f99b27..440094e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java
@@ -22,15 +22,12 @@ import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.LI;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL;
-import org.apache.slider.api.ClusterDescription;
-import org.apache.slider.api.StatusKeys;
 import org.apache.slider.api.types.ApplicationLivenessInformation;
 import org.apache.slider.common.tools.SliderUtils;
 import org.apache.slider.core.registry.docstore.ExportEntry;
 import org.apache.slider.core.registry.docstore.PublishedExports;
 import org.apache.slider.core.registry.docstore.PublishedExportsSet;
-import org.apache.slider.providers.MonitorDetail;
-import org.apache.slider.providers.ProviderService;
+import org.apache.slider.server.appmaster.metrics.SliderMetrics;
 import org.apache.slider.server.appmaster.state.RoleStatus;
 import org.apache.slider.server.appmaster.web.WebAppApi;
 import org.slf4j.Logger;
@@ -39,7 +36,6 @@ import org.slf4j.LoggerFactory;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
-import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 
@@ -71,8 +67,7 @@ public class IndexBlock extends SliderHamletBlock {
   // An extra method to make testing easier since you can't make an instance of Block
   @VisibleForTesting
   protected void doIndex(Hamlet html, String providerName) {
-    ClusterDescription clusterStatus = appState.getClusterStatus();
-    String name = clusterStatus.name;
+    String name = appState.getApplicationName();
     if (name != null && (name.startsWith(" ") || name.endsWith(" "))) {
       name = "'" + name + "'";
     } 
@@ -96,23 +91,23 @@ public class IndexBlock extends SliderHamletBlock {
           ._();
     table1.tr()
           .td("Create time: ")
-          .td(getInfoAvoidingNulls(StatusKeys.INFO_CREATE_TIME_HUMAN))
+          .td("N/A")
           ._();
     table1.tr()
           .td("Running since: ")
-          .td(getInfoAvoidingNulls(StatusKeys.INFO_LIVE_TIME_HUMAN))
+          .td("N/A")
           ._();
     table1.tr()
           .td("Time last flexed: ")
-          .td(getInfoAvoidingNulls(StatusKeys.INFO_FLEX_TIME_HUMAN))
+          .td("N/A")
           ._();
     table1.tr()
           .td("Application storage path: ")
-          .td(clusterStatus.dataPath)
+          .td("N/A")
           ._();
     table1.tr()
           .td("Application configuration path: ")
-          .td(clusterStatus.originConfigurationPath)
+          .td("N/A")
           ._();
     table1._();
     div._();
@@ -136,7 +131,8 @@ public class IndexBlock extends SliderHamletBlock {
     trb(header, "Placement");
     header._()._();  // tr & thead
 
-    List<RoleStatus> roleStatuses = appState.cloneRoleStatusList();
+    List<RoleStatus> roleStatuses =
+        new ArrayList<>(appState.getRoleStatusMap().values());
     Collections.sort(roleStatuses, new RoleStatus.CompareByName());
     for (RoleStatus status : roleStatuses) {
       String roleName = status.getName();
@@ -144,7 +140,7 @@ public class IndexBlock extends SliderHamletBlock {
       String aatext;
       if (status.isAntiAffinePlacement()) {
         boolean aaRequestOutstanding = status.isAARequestOutstanding();
-        int pending = (int)status.getPendingAntiAffineRequests();
+        int pending = (int)status.getAAPending();
         aatext = buildAADetails(aaRequestOutstanding, pending);
         if (SliderUtils.isSet(status.getLabelExpression())) {
           aatext += " (label: " + status.getLabelExpression() + ")";
@@ -160,17 +156,17 @@ public class IndexBlock extends SliderHamletBlock {
         } else {
           aatext = "";
         }
-        if (status.getRequested() > 0) {
+        if (status.getPending() > 0) {
           roleWithOpenRequest ++;
         }
       }
+      SliderMetrics metrics = status.getComponentMetrics();
       table.tr()
         .td().a(nameUrl, roleName)._()
-        .td(String.format("%d", status.getDesired()))
-        .td(String.format("%d", status.getActual()))
-        .td(String.format("%d", status.getRequested()))
-        .td(String.format("%d", status.getFailed()))
-        .td(String.format("%d", status.getStartFailed()))
+        .td(String.format("%d", metrics.containersDesired.value()))
+        .td(String.format("%d", metrics.containersRunning.value()))
+        .td(String.format("%d", metrics.containersPending.value()))
+        .td(String.format("%d", metrics.containersFailed.value()))
         .td(aatext)
         ._();
     }
@@ -218,7 +214,7 @@ public class IndexBlock extends SliderHamletBlock {
     DIV<Hamlet> provider_info = html.div("provider_info");
     provider_info.h3(providerName + " information");
     UL<Hamlet> ul = html.ul();
-    addProviderServiceOptions(providerService, ul, clusterStatus);
+    //TODO render app/cluster status
     ul._();
     provider_info._();
 
@@ -250,40 +246,9 @@ public class IndexBlock extends SliderHamletBlock {
   }
 
   private String getProviderName() {
-    return providerService.getHumanName();
+    return "docker";
   }
 
-  private String getInfoAvoidingNulls(String key) {
-    String createTime = appState.getClusterStatus().getInfo(key);
-
-    return null == createTime ? "N/A" : createTime;
-  }
-
-  protected void addProviderServiceOptions(ProviderService provider,
-      UL ul, ClusterDescription clusterStatus) {
-    Map<String, MonitorDetail> details = provider.buildMonitorDetails(
-        clusterStatus);
-    if (null == details) {
-      return;
-    }
-    // Loop over each entry, placing the text in the UL, adding an anchor when the URL is non-null/empty
-    for (Entry<String, MonitorDetail> entry : details.entrySet()) {
-      MonitorDetail detail = entry.getValue();
-      if (SliderUtils.isSet(detail.getValue()) ) {
-        LI item = ul.li();
-        item.span().$class("bold")._(entry.getKey())._();
-        item._(" - ");
-        if (detail.isUrl()) {
-          // Render an anchor if the value is a URL
-          item.a(detail.getValue(), detail.getValue())._();
-        } else {
-          item._(detail.getValue())._();
-        }
-      } else {
-        ul.li(entry.getKey());
-      }
-    }
-  }
 
   protected void enumeratePublishedExports(PublishedExportsSet exports, UL<Hamlet> ul) {
     for(String key : exports.keys()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/YarnApplicationProbe.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/YarnApplicationProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/YarnApplicationProbe.java
deleted file mode 100644
index 92df048..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/YarnApplicationProbe.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.servicemonitor;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.slider.client.SliderYarnClientImpl;
-import org.apache.slider.core.exceptions.UnknownApplicationInstanceException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Probe for YARN application
- */
-public class YarnApplicationProbe extends Probe {
-  protected static final Logger log = LoggerFactory.getLogger(
-    YarnApplicationProbe.class);
-
-  /**
-   * Yarn client service
-   */
-  private SliderYarnClientImpl yarnClient;
-  private final String clustername;
-  private final String username;
-
-  public YarnApplicationProbe(String clustername,
-                              SliderYarnClientImpl yarnClient,
-                              String name,
-                              Configuration conf, String username)
-      throws IOException {
-    super("Port probe " + name + " " + clustername,
-          conf);
-    this.clustername = clustername;
-    this.yarnClient = yarnClient;
-    this.username = username;
-  }
-
-
-  @Override
-  public void init() throws IOException {
-   
-    log.info("Checking " + clustername );
-  }
-
-  /**
-   * Try to connect to the (host,port); a failure to connect within
-   * the specified timeout is a failure
-   * @param livePing is the ping live: true for live; false for boot time
-   * @return the outcome
-   */
-  @Override
-  public ProbeStatus ping(boolean livePing) {
-    ProbeStatus status = new ProbeStatus();
-    try {
-      List<ApplicationReport> instances = yarnClient
-          .listDeployedInstances(username, null, clustername);
-      ApplicationReport instance = yarnClient
-          .findClusterInInstanceList(instances, clustername);
-      if (null == instance) {
-        throw UnknownApplicationInstanceException.unknownInstance(clustername);
-      }
-      status.succeed(this);
-    } catch (Exception e) {
-      status.fail(this, e);
-    }
-    return status;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java
index 254bf27..6defa2b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java
@@ -210,8 +210,6 @@ public class YarnRegistryViewForProviders {
 
   /**
    * Add a service under a path for the current user
-   * @param serviceClass service class to use under ~user
-   * @param serviceName name of the service
    * @param record service record
    * @param deleteTreeFirst perform recursive delete of the path first
    * @return the path the service was created at


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: YARN-6588. Add native-service AM log4j properties in classpath. Contributed by Jian He

Posted by ji...@apache.org.
YARN-6588. Add native-service AM log4j properties in classpath. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7acc577c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7acc577c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7acc577c

Branch: refs/heads/yarn-native-services
Commit: 7acc577ccba83845dca9ecea70afbb9ebad2f0f6
Parents: 8ad5432
Author: Billie Rinaldi <bi...@apache.org>
Authored: Fri May 12 08:50:46 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:26 2017 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/slider/common/tools/SliderUtils.java   | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7acc577c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
index 80b70b9..bc8e139 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
@@ -1642,6 +1642,7 @@ public final class SliderUtils {
       boolean usingMiniMRCluster) {
 
     ClasspathConstructor classpath = new ClasspathConstructor();
+    classpath.append(SliderKeys.LOG4J_SERVER_PROP_FILENAME);
 
     // add the runtime classpath needed for tests to work
     if (usingMiniMRCluster) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: YARN-6335. Port slider's groovy unit tests to yarn native services. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRebuildOnAMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRebuildOnAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRebuildOnAMRestart.java
new file mode 100644
index 0000000..b0634bf
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRebuildOnAMRestart.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockAppState;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.NodeMap;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Test that app state is rebuilt on a restart.
+ */
+public class TestMockAppStateRebuildOnAMRestart extends BaseMockAppStateTest
+    implements MockRoles {
+
+  @Override
+  public String getTestName() {
+    return "TestMockAppStateRebuildOnAMRestart";
+  }
+
+  @Test
+  public void testRebuild() throws Throwable {
+
+    int r0 = 1;
+    int r1 = 2;
+    int r2 = 3;
+    getRole0Status().setDesired(r0);
+    getRole1Status().setDesired(r1);
+    getRole2Status().setDesired(r2);
+    List<RoleInstance> instances = createAndStartNodes();
+
+    int clusterSize = r0 + r1 + r2;
+    assertEquals(instances.size(), clusterSize);
+
+    //clone the list
+    List<Container> containers = new ArrayList<>();
+    for (RoleInstance ri : instances) {
+      containers.add(ri.container);
+    }
+    NodeMap nodemap = appState.getRoleHistory().cloneNodemap();
+
+    //and rebuild
+
+    AppStateBindingInfo bindingInfo = buildBindingInfo();
+    bindingInfo.application = factory.newApplication(r0, r1, r2)
+        .name(getTestName());
+    bindingInfo.liveContainers = containers;
+    appState = new MockAppState(bindingInfo);
+
+    assertEquals(appState.getLiveContainers().size(), clusterSize);
+
+    appState.getRoleHistory().dump();
+
+    //check that the app state direct structures match
+    List<RoleInstance> r0live = appState.enumLiveNodesInRole(ROLE0);
+    List<RoleInstance> r1live = appState.enumLiveNodesInRole(ROLE1);
+    List<RoleInstance> r2live = appState.enumLiveNodesInRole(ROLE2);
+
+    assertEquals(r0, r0live.size());
+    assertEquals(r1, r1live.size());
+    assertEquals(r2, r2live.size());
+
+    //now examine the role history
+    NodeMap newNodemap = appState.getRoleHistory().cloneNodemap();
+
+    for (NodeInstance nodeInstance : newNodemap.values()) {
+      String hostname = nodeInstance.hostname;
+      NodeInstance orig = nodemap.get(hostname);
+      assertNotNull("Null entry in original nodemap for " + hostname, orig);
+
+      for (int i : Arrays.asList(getRole0Status().getKey(), getRole1Status()
+          .getKey(), getRole2Status().getKey())) {
+        assertEquals(nodeInstance.getActiveRoleInstances(i), orig
+            .getActiveRoleInstances(i));
+        NodeEntry origRE = orig.getOrCreate(i);
+        NodeEntry newRE = nodeInstance.getOrCreate(i);
+        assertEquals(origRE.getLive(), newRE.getLive());
+        assertEquals(0, newRE.getStarting());
+      }
+    }
+    assertEquals(0, appState.reviewRequestAndReleaseNodes().size());
+
+    Application application = appState.getClusterStatus();
+    // verify the AM restart container count was set
+    Long restarted = application.getNumberOfRunningContainers();
+    assertNotNull(restarted);
+    //and that the count == 1 master + the region servers
+    assertEquals(restarted.longValue(), (long)containers.size());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRolePlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRolePlacement.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRolePlacement.java
new file mode 100644
index 0000000..946f1c1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRolePlacement.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.CancelSingleRequest;
+import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.apache.slider.server.appmaster.state.ContainerAssignment;
+import org.apache.slider.server.appmaster.state.RoleHistoryUtils;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.apache.slider.server.appmaster.state.ContainerPriority.extractRole;
+
+/**
+ * Test that the app state lets you ask for nodes, get a specific host,
+ * release it and then get that one back again.
+ */
+public class TestMockAppStateRolePlacement extends BaseMockAppStateTest
+    implements MockRoles {
+
+  @Override
+  public String getTestName() {
+    return "TestMockAppStateRolePlacement";
+  }
+
+
+  @Test
+  public void testAllocateReleaseRealloc() throws Throwable {
+    getRole0Status().setDesired(1);
+
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    ContainerRequestOperation operation = (ContainerRequestOperation)ops
+        .get(0);
+    AMRMClient.ContainerRequest request = operation.getRequest();
+    assertTrue(request.getRelaxLocality());
+    assertNull(request.getNodes());
+    assertNull(request.getRacks());
+    assertNotNull(request.getCapability());
+
+    Container allocated = engine.allocateContainer(request);
+    List<ContainerAssignment> assignments = new ArrayList<>();
+    List<AbstractRMOperation> releaseOperations = new ArrayList<>();
+    appState.onContainersAllocated(Arrays.asList((Container)allocated),
+        assignments, releaseOperations);
+    // verify the release matches the allocation
+    assertEquals(releaseOperations.size(), 1);
+    CancelSingleRequest cancelOp = (CancelSingleRequest)releaseOperations
+        .get(0);
+    assertNotNull(cancelOp.getRequest());
+    assertNotNull(cancelOp.getRequest().getCapability());
+    assertEquals(cancelOp.getRequest().getCapability(), allocated
+        .getResource());
+    // now the assignment
+    assertEquals(assignments.size(), 1);
+    ContainerAssignment assigned = assignments.get(0);
+    Container container = assigned.container;
+    assertEquals(container.getId(), allocated.getId());
+    int roleId = assigned.role.getPriority();
+    assertEquals(roleId, extractRole(request.getPriority()));
+    assertEquals(assigned.role.getName(), ROLE0);
+    String containerHostname = RoleHistoryUtils.hostnameOf(container);
+    RoleInstance ri = roleInstance(assigned);
+    //tell the app it arrived
+    appState.containerStartSubmitted(container, ri);
+    assertNotNull(appState.onNodeManagerContainerStarted(container.getId()));
+    assertEquals(getRole0Status().getRunning(), 1);
+    ops = appState.reviewRequestAndReleaseNodes();
+    assertEquals(ops.size(), 0);
+
+    //now it is surplus
+    getRole0Status().setDesired(0);
+    ops = appState.reviewRequestAndReleaseNodes();
+    ContainerReleaseOperation release = (ContainerReleaseOperation) ops.get(0);
+
+    assertEquals(release.getContainerId(), container.getId());
+    engine.execute(ops);
+    assertNotNull(appState.onCompletedContainer(containerStatus(container
+        .getId())).roleInstance);
+
+    //view the world
+    appState.getRoleHistory().dump();
+
+    //now ask for a new one
+    getRole0Status().setDesired(1);
+    ops = appState.reviewRequestAndReleaseNodes();
+    assertEquals(ops.size(), 1);
+    operation = (ContainerRequestOperation) ops.get(0);
+    AMRMClient.ContainerRequest request2 = operation.getRequest();
+    assertNotNull(request2);
+    assertEquals(request2.getNodes().get(0), containerHostname);
+    assertFalse(request2.getRelaxLocality());
+    engine.execute(ops);
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRoleRelease.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRoleRelease.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRoleRelease.java
new file mode 100644
index 0000000..d62a91e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRoleRelease.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Test that if you have >1 role, the right roles are chosen for release.
+ */
+public class TestMockAppStateRoleRelease extends BaseMockAppStateTest
+    implements MockRoles {
+
+  @Override
+  public String getTestName() {
+    return "TestMockAppStateRoleRelease";
+  }
+
+  /**
+   * Small cluster with multiple containers per node,
+   * to guarantee many container allocations on each node.
+   * @return
+   */
+  @Override
+  public MockYarnEngine createYarnEngine() {
+    return new MockYarnEngine(4, 4);
+  }
+
+  @Test
+  public void testAllocateReleaseRealloc() throws Throwable {
+    /**
+     * Allocate to all nodes
+     */
+    getRole0Status().setDesired(6);
+    getRole1Status().setDesired(5);
+    getRole2Status().setDesired(4);
+    List<RoleInstance> instances = createAndStartNodes();
+    assertEquals(instances.size(), 15);
+
+    //now it is surplus
+    getRole0Status().setDesired(0);
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+
+    List<ContainerId> released = new ArrayList<>();
+    engine.execute(ops, released);
+    List<ContainerId> ids = extractContainerIds(instances, ROLE0);
+    for (ContainerId cid : released) {
+      assertNotNull(appState.onCompletedContainer(containerStatus(cid))
+          .roleInstance);
+      assertTrue(ids.contains(cid));
+    }
+
+    //view the world
+    appState.getRoleHistory().dump();
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
new file mode 100644
index 0000000..eaf5271
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.api.resource.Resource;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
+import org.apache.slider.server.appmaster.state.MostRecentContainerReleaseSelector;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+
+import java.util.Collections;
+
+/**
+ * Test that if you have more than one role, the right roles are chosen for
+ * release.
+ */
+public class TestMockAppStateUniqueNames extends BaseMockAppStateTest
+    implements MockRoles {
+
+  @Override
+  public String getTestName() {
+    return "TestMockAppStateUniqueNames";
+  }
+
+  /**
+   * Small cluster with multiple containers per node,
+   * to guarantee many container allocations on each node.
+   * @return
+   */
+  @Override
+  public MockYarnEngine createYarnEngine() {
+    return new MockYarnEngine(4, 4);
+  }
+
+  @Override
+  public AppStateBindingInfo buildBindingInfo() {
+    AppStateBindingInfo bindingInfo = super.buildBindingInfo();
+    bindingInfo.releaseSelector = new MostRecentContainerReleaseSelector();
+    return bindingInfo;
+  }
+
+  @Override
+  public Application buildApplication() {
+    Application application = super.buildApplication();
+
+    Component component = new Component().name("group1").numberOfContainers(2L)
+        .resource(new Resource().memory("1024").cpus(2))
+        .uniqueComponentSupport(true);
+
+    application.getComponents().add(component);
+    return application;
+  }
+
+  @Test
+  public void testDynamicFlexDown() throws Throwable {
+    createAndStartNodes();
+    appState.updateComponents(Collections.singletonMap("group1", 0L));
+    createAndStartNodes();
+    RoleStatus roleStatus = appState.lookupRoleStatus("group11");
+    assertEquals(0, roleStatus.getDesired());
+    assertEquals(1024L, roleStatus.getResourceRequirements().getMemorySize());
+    assertEquals(2, roleStatus.getResourceRequirements().getVirtualCores());
+    assertEquals("group1", roleStatus.getGroup());
+  }
+
+  @Test
+  public void testDynamicFlexUp() throws Throwable {
+    createAndStartNodes();
+    appState.updateComponents(Collections.singletonMap("group1", 3L));
+    createAndStartNodes();
+    RoleStatus group11 = appState.lookupRoleStatus("group11");
+    RoleStatus group12 = appState.lookupRoleStatus("group12");
+    RoleStatus group13 = appState.lookupRoleStatus("group13");
+    assertEquals(1, group11.getDesired());
+    assertEquals(1, group12.getDesired());
+    assertEquals(1, group13.getDesired());
+    assertEquals(1024L, group11.getResourceRequirements().getMemorySize());
+    assertEquals(1024L, group12.getResourceRequirements().getMemorySize());
+    assertEquals(1024L, group13.getResourceRequirements().getMemorySize());
+    assertEquals(2, group11.getResourceRequirements().getVirtualCores());
+    assertEquals(2, group12.getResourceRequirements().getVirtualCores());
+    assertEquals(2, group13.getResourceRequirements().getVirtualCores());
+    assertEquals("group1", group11.getGroup());
+    assertEquals("group1", group12.getGroup());
+    assertEquals("group1", group13.getGroup());
+
+    appState.refreshClusterStatus();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java
new file mode 100644
index 0000000..046bd83
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockAppState;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.junit.Test;
+
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Test the container resource allocation logic.
+ */
+public class TestMockContainerResourceAllocations extends BaseMockAppStateTest {
+
+  @Override
+  public Application buildApplication() {
+    return factory.newApplication(1, 0, 0).name(getTestName());
+  }
+
+  @Test
+  public void testNormalAllocations() throws Throwable {
+    Component role0 = appState.getClusterStatus().getComponent(MockRoles.ROLE0);
+    role0.resource(new org.apache.slider.api.resource.Resource().memory("512")
+        .cpus(2));
+    appState.updateComponents(Collections.singletonMap(role0.getName(),
+        role0.getNumberOfContainers()));
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    assertEquals(1, ops.size());
+    ContainerRequestOperation operation = (ContainerRequestOperation) ops
+        .get(0);
+    Resource requirements = operation.getRequest().getCapability();
+    assertEquals(512L, requirements.getMemorySize());
+    assertEquals(2, requirements.getVirtualCores());
+  }
+
+  @Test
+  public void testMaxMemAllocations() throws Throwable {
+    // max core allocations no longer supported
+    Component role0 = appState.getClusterStatus().getComponent(MockRoles.ROLE0);
+    role0.resource(new org.apache.slider.api.resource.Resource()
+        .memory(ResourceKeys.YARN_RESOURCE_MAX).cpus(2));
+    appState.updateComponents(Collections.singletonMap(role0.getName(),
+        role0.getNumberOfContainers()));
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    assertEquals(1, ops.size());
+    ContainerRequestOperation operation = (ContainerRequestOperation) ops
+        .get(0);
+    Resource requirements = operation.getRequest().getCapability();
+    assertEquals(MockAppState.RM_MAX_RAM, requirements.getMemorySize());
+    assertEquals(2, requirements.getVirtualCores());
+  }
+
+  @Test
+  public void testMaxDefaultAllocations() throws Throwable {
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    assertEquals(ops.size(), 1);
+    ContainerRequestOperation operation = (ContainerRequestOperation) ops
+        .get(0);
+    Resource requirements = operation.getRequest().getCapability();
+    assertEquals(ResourceKeys.DEF_YARN_MEMORY, requirements.getMemorySize());
+    assertEquals(ResourceKeys.DEF_YARN_CORES, requirements.getVirtualCores());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockLabelledAAPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockLabelledAAPlacement.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockLabelledAAPlacement.java
new file mode 100644
index 0000000..a3f8abd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockLabelledAAPlacement.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.AppState.NodeUpdatedOutcome;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Test Anti-affine placement.
+ */
+public class TestMockLabelledAAPlacement extends BaseMockAppStateAATest
+    implements MockRoles {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestMockLabelledAAPlacement.class);
+
+  private static final int NODES = 3;
+  private static final int GPU_NODES = 2;
+  private static final String HOST0 = "00000000";
+  private static final String HOST1 = "00000001";
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+
+    updateNodes(MockFactory.INSTANCE.newNodeReport(HOST0, NodeState.RUNNING,
+        LABEL_GPU));
+    updateNodes(MockFactory.INSTANCE.newNodeReport(HOST1, NodeState.RUNNING,
+        LABEL_GPU));
+  }
+
+  @Override
+  public MockYarnEngine createYarnEngine() {
+    return new MockYarnEngine(NODES, 8);
+  }
+
+  void assertAllContainersAA() {
+    assertAllContainersAA(getGpuRole().getKey());
+  }
+
+  /**
+   *
+   * @throws Throwable
+   */
+  @Test
+  public void testAskForTooMany() throws Throwable {
+    RoleStatus gpuRole = getGpuRole();
+
+    describe("Ask for 1 more than the no of available nodes;" +
+        " expect the final request to be unsatisfied until the cluster " +
+        "changes size");
+    //more than expected
+    int size = GPU_NODES;
+    gpuRole.setDesired(size + 1);
+
+    List<AbstractRMOperation > operations = appState
+        .reviewRequestAndReleaseNodes();
+    assertTrue(gpuRole.isAARequestOutstanding());
+
+    assertEquals(gpuRole.getAAPending(), size);
+    for (int i = 0; i < size; i++) {
+      String iter = "Iteration " + i + " role = " + getAaRole();
+      describe(iter);
+      List<AbstractRMOperation > operationsOut = new ArrayList<>();
+
+      List<RoleInstance> roleInstances = submitOperations(operations,
+          EMPTY_ID_LIST, operationsOut);
+      // one instance per request
+      assertEquals(1, roleInstances.size());
+      appState.onNodeManagerContainerStarted(roleInstances.get(0)
+          .getContainerId());
+      assertAllContainersAA();
+      // there should be none left
+      LOG.debug(nodeInformationSnapshotAsString());
+      operations = operationsOut;
+      if (i + 1 < size) {
+        assertEquals(2, operations.size());
+      } else {
+        assertEquals(1, operations.size());
+      }
+    }
+    // expect an outstanding AA request to be unsatisfied
+    assertTrue(gpuRole.getRunning() < gpuRole.getDesired());
+    assertEquals(0, gpuRole.getRequested());
+    assertFalse(gpuRole.isAARequestOutstanding());
+    List<Container> allocatedContainers = engine.execute(operations,
+        EMPTY_ID_LIST);
+    assertEquals(0, allocatedContainers.size());
+    // in a review now, no more requests can be generated, as there is no
+    // space for AA placements, even though there is cluster capacity
+    assertEquals(0, appState.reviewRequestAndReleaseNodes().size());
+
+    // switch node 2 into being labelled
+    NodeUpdatedOutcome outcome = updateNodes(MockFactory.INSTANCE.
+        newNodeReport("00000002", NodeState.RUNNING, "gpu"));
+
+    assertEquals(NODES, cloneNodemap().size());
+    assertTrue(outcome.clusterChanged);
+    // no active calls to empty
+    assertTrue(outcome.operations.isEmpty());
+    assertEquals(1, appState.reviewRequestAndReleaseNodes().size());
+  }
+
+  protected AppState.NodeUpdatedOutcome addNewNode() {
+    return updateNodes(MockFactory.INSTANCE.newNodeReport("00000004",
+        NodeState.RUNNING, "gpu"));
+  }
+
+  @Test
+  public void testClusterSizeChangesDuringRequestSequence() throws Throwable {
+    RoleStatus gpuRole = getGpuRole();
+    describe("Change the cluster size where the cluster size changes during " +
+        "a test sequence.");
+    gpuRole.setDesired(GPU_NODES + 1);
+    List<AbstractRMOperation> operations = appState
+        .reviewRequestAndReleaseNodes();
+    assertTrue(gpuRole.isAARequestOutstanding());
+    assertEquals(GPU_NODES, gpuRole.getAAPending());
+    NodeUpdatedOutcome outcome = addNewNode();
+    assertTrue(outcome.clusterChanged);
+    // one call to cancel
+    assertEquals(1, outcome.operations.size());
+    // and on a review, one more to rebuild
+    assertEquals(1, appState.reviewRequestAndReleaseNodes().size());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestOutstandingRequestValidation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestOutstandingRequestValidation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestOutstandingRequestValidation.java
new file mode 100644
index 0000000..5ae626e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestOutstandingRequestValidation.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
+import org.apache.slider.server.appmaster.state.ContainerPriority;
+import org.apache.slider.server.appmaster.state.OutstandingRequest;
+import org.apache.slider.utils.SliderTestBase;
+import org.junit.Test;
+
+/**
+ * Test outstanding request validation.
+ */
+public class TestOutstandingRequestValidation extends SliderTestBase {
+
+  private static final String[] H1 = hosts("one");
+
+  @Test
+  public void testRelaxedNohostsOrLabels() throws Throwable {
+    createAndValidate(null, null, true);
+  }
+
+  @Test
+  public void testRelaxedLabels() throws Throwable {
+    createAndValidate(null, "gpu", true);
+  }
+
+  @Test
+  public void testNonRelaxedLabels() throws Throwable {
+    expectCreationFailure(null, "gpu", false);
+  }
+
+  @Test
+  public void testRelaxedHostNoLabel() throws Throwable {
+    createAndValidate(H1, "", true);
+  }
+
+  /**
+   * Use varargs for simple list to array conversion.
+   * @param hostnames host names
+   * @return
+   */
+  public static String[] hosts(String...hostnames) {
+    return hostnames;
+  }
+
+  void expectCreationFailure(
+      String[] hosts,
+      String labels,
+      boolean relaxLocality) {
+    try {
+      ContainerRequest result = createAndValidate(hosts, labels, relaxLocality);
+      fail("Expected an exception, got " + result);
+    } catch (IllegalArgumentException expected) {
+      assertTrue(expected.toString()
+          .contains("Can't turn off locality relaxation on a request with no " +
+              "location constraints"));
+    }
+  }
+
+  AMRMClient.ContainerRequest createAndValidate(
+      String[] hosts,
+      String labels,
+      boolean relaxLocality) {
+    int cores = 1;
+    int memory = 64;
+    int p = 1;
+    Priority pri = ContainerPriority.createPriority(p, !relaxLocality);
+    ContainerRequest issuedRequest =
+        newRequest(pri, hosts, labels, relaxLocality);
+    OutstandingRequest.validateContainerRequest(issuedRequest, p, "");
+    return issuedRequest;
+  }
+
+  AMRMClient.ContainerRequest newRequest(
+      Priority pri,
+      String[] hosts,
+      String labels,
+      boolean relaxLocality) {
+    int cores = 1;
+    int memory = 64;
+    Resource resource = Resource.newInstance(memory, cores);
+    return new AMRMClient.ContainerRequest(resource,
+      hosts,
+      null,
+      pri,
+      relaxLocality,
+      labels);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryAA.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryAA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryAA.java
new file mode 100644
index 0000000..077a6d5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryAA.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.slider.api.proto.Messages;
+import org.apache.slider.api.types.NodeInformation;
+import org.apache.slider.api.types.NodeInformationList;
+import org.apache.slider.api.types.RestTypeMarshalling;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.NodeMap;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.utils.SliderTestBase;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Test anti-affine placement.
+ */
+public class TestRoleHistoryAA extends SliderTestBase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRoleHistoryAA.class);
+
+  private List<String> hostnames = Arrays.asList("1", "2", "3");
+  private NodeMap nodeMap, gpuNodeMap;
+  private RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+
+  public TestRoleHistoryAA() throws BadConfigException {
+  }
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    nodeMap = createNodeMap(hostnames, NodeState.RUNNING, "");
+    gpuNodeMap = createNodeMap(hostnames, NodeState.RUNNING, "GPU");
+  }
+
+  @Test
+  public void testFindNodesInFullCluster() throws Throwable {
+    // all three will surface at first
+    verifyResultSize(3, nodeMap.findAllNodesForRole(1, ""));
+  }
+
+  @Test
+  public void testFindNodesInUnhealthyCluster() throws Throwable {
+    // all three will surface at first
+    markNodeOneUnhealthy();
+    verifyResultSize(2, nodeMap.findAllNodesForRole(1, ""));
+  }
+
+  public boolean markNodeOneUnhealthy() {
+    return setNodeState(nodeMap.get("1"), NodeState.UNHEALTHY);
+  }
+
+  protected boolean setNodeState(NodeInstance node, NodeState state) {
+    return node.updateNode(MockFactory.INSTANCE.newNodeReport(node.hostname,
+        state, ""));
+  }
+
+  @Test
+  public void testFindNoNodesWrongLabel() throws Throwable {
+    // all three will surface at first
+    verifyResultSize(0, nodeMap.findAllNodesForRole(1, "GPU"));
+  }
+
+  @Test
+  public void testFindSomeNodesSomeLabel() throws Throwable {
+    // all three will surface at first
+    update(nodeMap,
+        Arrays.asList(MockFactory.INSTANCE.newNodeReport("1", NodeState
+            .RUNNING, "GPU")));
+    List<NodeInstance> gpuNodes = nodeMap.findAllNodesForRole(1, "GPU");
+    verifyResultSize(1, gpuNodes);
+    NodeInstance instance = gpuNodes.get(0);
+    instance.getOrCreate(1).onStarting();
+    assertFalse(instance.canHost(1, "GPU"));
+    assertFalse(instance.canHost(1, ""));
+    verifyResultSize(0, nodeMap.findAllNodesForRole(1, "GPU"));
+
+  }
+
+  @Test
+  public void testFindNoNodesRightLabel() throws Throwable {
+    // all three will surface at first
+    verifyResultSize(3, gpuNodeMap.findAllNodesForRole(1, "GPU"));
+  }
+
+  @Test
+  public void testFindNoNodesNoLabel() throws Throwable {
+    // all three will surface at first
+    verifyResultSize(3, gpuNodeMap.findAllNodesForRole(1, ""));
+  }
+
+  @Test
+  public void testFindNoNodesClusterRequested() throws Throwable {
+    // all three will surface at first
+    for (NodeInstance ni : nodeMap.values()) {
+      ni.getOrCreate(1).request();
+    }
+    assertNoAvailableNodes(1);
+  }
+
+  @Test
+  public void testFindNoNodesClusterBusy() throws Throwable {
+    // all three will surface at first
+    for (NodeInstance ni : nodeMap.values()) {
+      ni.getOrCreate(1).request();
+    }
+    assertNoAvailableNodes(1);
+  }
+
+  /**
+   * Tag all nodes as starting, then walk one through a bit
+   * more of its lifecycle.
+   */
+  @Test
+  public void testFindNoNodesLifecycle() throws Throwable {
+    // all three will surface at first
+    for (NodeInstance ni : nodeMap.values()) {
+      ni.getOrCreate(1).onStarting();
+    }
+    assertNoAvailableNodes(1);
+
+    // walk one of the nodes through the lifecycle
+    NodeInstance node1 = nodeMap.get("1");
+    assertFalse(node1.canHost(1, ""));
+    node1.get(1).onStartCompleted();
+    assertFalse(node1.canHost(1, ""));
+    assertNoAvailableNodes(1);
+    node1.get(1).release();
+    assertTrue(node1.canHost(1, ""));
+    List<NodeInstance> list2 =
+        verifyResultSize(1, nodeMap.findAllNodesForRole(1, ""));
+    assertEquals(list2.get(0).hostname, "1");
+
+    // now tag that node as unhealthy and expect it to go away
+    markNodeOneUnhealthy();
+    assertNoAvailableNodes(1);
+  }
+
+  @Test
+  public void testRolesIndependent() throws Throwable {
+    NodeInstance node1 = nodeMap.get("1");
+    NodeEntry role1 = node1.getOrCreate(1);
+    NodeEntry role2 = node1.getOrCreate(2);
+    for (NodeInstance ni : nodeMap.values()) {
+      ni.updateNode(MockFactory.INSTANCE.newNodeReport("0", NodeState
+          .UNHEALTHY, ""));
+    }
+    assertNoAvailableNodes(1);
+    assertNoAvailableNodes(2);
+    assertTrue(setNodeState(node1, NodeState.RUNNING));
+    // tag role 1 as busy
+    role1.onStarting();
+    assertNoAvailableNodes(1);
+
+    verifyResultSize(1, nodeMap.findAllNodesForRole(2, ""));
+    assertTrue(node1.canHost(2, ""));
+  }
+
+  @Test
+  public void testNodeEntryAvailablity() throws Throwable {
+    NodeEntry entry = new NodeEntry(1);
+    assertTrue(entry.isAvailable());
+    entry.onStarting();
+    assertFalse(entry.isAvailable());
+    entry.onStartCompleted();
+    assertFalse(entry.isAvailable());
+    entry.release();
+    assertTrue(entry.isAvailable());
+    entry.onStarting();
+    assertFalse(entry.isAvailable());
+    entry.onStartFailed();
+    assertTrue(entry.isAvailable());
+  }
+
+  @Test
+  public void testNodeInstanceSerialization() throws Throwable {
+    MockRoleHistory rh2 = new MockRoleHistory(new ArrayList<>());
+    rh2.getOrCreateNodeInstance("localhost");
+    NodeInstance instance = rh2.getOrCreateNodeInstance("localhost");
+    instance.getOrCreate(1).onStartCompleted();
+    Map<Integer, String> naming = Collections.singletonMap(1, "manager");
+    NodeInformation ni = instance.serialize(naming);
+    assertEquals(1, ni.entries.get("manager").live);
+    NodeInformation ni2 = rh2.getNodeInformation("localhost", naming);
+    assertEquals(1, ni2.entries.get("manager").live);
+    Map<String, NodeInformation> info = rh2.getNodeInformationSnapshot(naming);
+    assertEquals(1, info.get("localhost").entries.get("manager").live);
+    NodeInformationList nil = new NodeInformationList(info.values());
+    assertEquals(1, nil.get(0).entries.get("manager").live);
+
+    Messages.NodeInformationProto nodeInformationProto =
+        RestTypeMarshalling.marshall(ni);
+    Messages.NodeEntryInformationProto entryProto = nodeInformationProto
+        .getEntries(0);
+    assertNotNull(entryProto);
+    assertEquals(1, entryProto.getPriority());
+    NodeInformation unmarshalled =
+        RestTypeMarshalling.unmarshall(nodeInformationProto);
+    assertEquals(unmarshalled.hostname, ni.hostname);
+    assertTrue(unmarshalled.entries.keySet().containsAll(ni.entries.keySet()));
+
+  }
+
+  @Test
+  public void testBuildRolenames() throws Throwable {
+
+  }
+  public List<NodeInstance> assertNoAvailableNodes(int role) {
+    String label = "";
+    return verifyResultSize(0, nodeMap.findAllNodesForRole(role, label));
+  }
+
+  List<NodeInstance> verifyResultSize(int size, List<NodeInstance> list) {
+    if (list.size() != size) {
+      for (NodeInstance ni : list) {
+        LOG.error(ni.toFullString());
+      }
+    }
+    assertEquals(size, list.size());
+    return list;
+  }
+
+  NodeMap createNodeMap(List<NodeReport> nodeReports)
+      throws BadConfigException {
+    NodeMap newNodeMap = new NodeMap(1);
+    update(newNodeMap, nodeReports);
+    return newNodeMap;
+  }
+
+  protected boolean update(NodeMap nm, List<NodeReport> nodeReports) {
+    return nm.buildOrUpdate(nodeReports);
+  }
+
+  NodeMap createNodeMap(List<String> hosts, NodeState state,
+      String label) throws BadConfigException {
+    return createNodeMap(MockFactory.INSTANCE.createNodeReports(hosts, state,
+        label));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryContainerEvents.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryContainerEvents.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryContainerEvents.java
new file mode 100644
index 0000000..36a480e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryContainerEvents.java
@@ -0,0 +1,447 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockContainer;
+import org.apache.slider.server.appmaster.model.mock.MockNodeId;
+import org.apache.slider.server.appmaster.state.ContainerOutcome;
+import org.apache.slider.server.appmaster.state.ContainerPriority;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.NodeMap;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Test container events at the role history level -one below
+ * the App State.
+ */
+public class TestRoleHistoryContainerEvents extends BaseMockAppStateTest {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRoleHistoryContainerEvents.class);
+
+  @Override
+  public String getTestName() {
+    return "TestRoleHistoryContainerEvents";
+  }
+
+  private NodeInstance age1Active4;
+  private NodeInstance age2Active2;
+  private NodeInstance age3Active0;
+  private NodeInstance age4Active1;
+  private NodeInstance age2Active0;
+
+  private RoleHistory roleHistory;
+
+  private Resource resource;
+
+  AMRMClient.ContainerRequest requestContainer(RoleStatus roleStatus) {
+    return roleHistory.requestContainerForRole(roleStatus).getIssuedRequest();
+  }
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+
+    age1Active4 = nodeInstance(1, 4, 0, 0);
+    age2Active2 = nodeInstance(2, 2, 0, 1);
+    age3Active0 = nodeInstance(3, 0, 0, 0);
+    age4Active1 = nodeInstance(4, 1, 0, 0);
+    age2Active0 = nodeInstance(2, 0, 0, 0);
+
+    roleHistory = appState.getRoleHistory();
+    roleHistory.insert(Arrays.asList(age2Active2, age2Active0,
+        age4Active1, age1Active4, age3Active0));
+    roleHistory.buildRecentNodeLists();
+    resource = Resource.newInstance(ResourceKeys.DEF_YARN_CORES,
+                                    ResourceKeys.DEF_YARN_MEMORY);
+  }
+
+  @Test
+  public void testFindAndCreate() throws Throwable {
+    RoleStatus roleStatus = getRole0Status();
+
+    AMRMClient.ContainerRequest request =
+        requestContainer(roleStatus);
+
+    List<String> requestNodes = request.getNodes();
+    assertNotNull(requestNodes);
+    assertEquals(1, requestNodes.size());
+    String hostname = requestNodes.get(0);
+    assertEquals(hostname, age3Active0.hostname);
+
+    //build a container
+    MockContainer container = factory.newContainer();
+    container.setNodeId(new MockNodeId(hostname, 0));
+    container.setPriority(request.getPriority());
+    roleHistory.onContainerAssigned(container);
+
+    NodeMap nodemap = roleHistory.cloneNodemap();
+    NodeInstance allocated = nodemap.get(hostname);
+    NodeEntry roleEntry = allocated.get(roleStatus.getKey());
+    assertEquals(1, roleEntry.getStarting());
+    assertFalse(roleEntry.isAvailable());
+    RoleInstance ri = new RoleInstance(container);
+    //start it
+    roleHistory.onContainerStartSubmitted(container, ri);
+    //later, declare that it started
+    roleHistory.onContainerStarted(container);
+    assertEquals(0, roleEntry.getStarting());
+    assertFalse(roleEntry.isAvailable());
+    assertEquals(1, roleEntry.getActive());
+    assertEquals(1, roleEntry.getLive());
+  }
+
+  @Test
+  public void testCreateAndRelease() throws Throwable {
+    RoleStatus roleStatus = getRole1Status();
+
+    //verify it is empty
+    assertTrue(roleHistory.listActiveNodes(roleStatus.getKey()).isEmpty());
+
+    AMRMClient.ContainerRequest request =
+        requestContainer(roleStatus);
+
+    assertNull(request.getNodes());
+
+    //pick an idle host
+    String hostname = age3Active0.hostname;
+
+    //build a container
+    MockContainer container = factory.newContainer(new MockNodeId(hostname,
+        0), request.getPriority());
+    roleHistory.onContainerAssigned(container);
+
+    NodeMap nodemap = roleHistory.cloneNodemap();
+    NodeInstance allocated = nodemap.get(hostname);
+    NodeEntry roleEntry = allocated.get(roleStatus.getKey());
+    assertEquals(1, roleEntry.getStarting());
+    assertFalse(roleEntry.isAvailable());
+    RoleInstance ri = new RoleInstance(container);
+    //start it
+    roleHistory.onContainerStartSubmitted(container, ri);
+    //later, declare that it started
+    roleHistory.onContainerStarted(container);
+    assertEquals(0, roleEntry.getStarting());
+    assertFalse(roleEntry.isAvailable());
+    assertEquals(1, roleEntry.getActive());
+    assertEquals(1, roleEntry.getLive());
+
+    // now pick that instance to destroy
+    List<NodeInstance> activeNodes = roleHistory.listActiveNodes(roleStatus
+        .getKey());
+
+
+    assertEquals(1, activeNodes.size());
+    NodeInstance target = activeNodes.get(0);
+    assertEquals(target, allocated);
+    roleHistory.onContainerReleaseSubmitted(container);
+    assertEquals(1, roleEntry.getReleasing());
+    assertEquals(1, roleEntry.getLive());
+    assertEquals(0, roleEntry.getActive());
+
+    // release completed
+    roleHistory.onReleaseCompleted(container);
+    assertEquals(0, roleEntry.getReleasing());
+    assertEquals(0, roleEntry.getLive());
+    assertEquals(0, roleEntry.getActive());
+
+    // verify it is empty
+    assertTrue(roleHistory.listActiveNodes(roleStatus.getKey()).isEmpty());
+
+    // ask for a container and expect to get the recently released one
+    AMRMClient.ContainerRequest request2 =
+        requestContainer(roleStatus);
+
+    List<String> nodes2 = request2.getNodes();
+    assertNotNull(nodes2);
+    String hostname2 = nodes2.get(0);
+
+    //pick an idle host
+    assertEquals(hostname2, age3Active0.hostname);
+  }
+
+
+  @Test
+  public void testStartWithoutWarning() throws Throwable {
+    //pick an idle host
+    String hostname = age3Active0.hostname;
+    //build a container
+    MockContainer container = factory.newContainer(
+        new MockNodeId(hostname, 0),
+        ContainerPriority.createPriority(getRole0Status().getKey(), false));
+
+    NodeMap nodemap = roleHistory.cloneNodemap();
+    NodeInstance allocated = nodemap.get(hostname);
+    NodeEntry roleEntry = allocated.get(getRole0Status().getKey());
+
+    //tell RH that it started
+    roleHistory.onContainerStarted(container);
+    assertEquals(0, roleEntry.getStarting());
+    assertFalse(roleEntry.isAvailable());
+    assertEquals(1, roleEntry.getActive());
+    assertEquals(1, roleEntry.getLive());
+  }
+
+  @Test
+  public void testStartFailed() throws Throwable {
+    RoleStatus roleStatus = getRole0Status();
+
+    AMRMClient.ContainerRequest request =
+        requestContainer(roleStatus);
+
+    LOG.info("req {}", request);
+    LOG.info("{}", request.getNodes());
+    String hostname = request.getNodes().get(0);
+    assertEquals(hostname, age3Active0.hostname);
+
+    //build a container
+    MockContainer container = factory.newContainer(new MockNodeId(hostname,
+        0), request.getPriority());
+    roleHistory.onContainerAssigned(container);
+
+    NodeMap nodemap = roleHistory.cloneNodemap();
+    NodeInstance allocated = nodemap.get(hostname);
+    NodeEntry roleEntry = allocated.get(roleStatus.getKey());
+    assertEquals(1, roleEntry.getStarting());
+    assertFalse(roleEntry.isAvailable());
+    RoleInstance ri = new RoleInstance(container);
+    //start it
+    roleHistory.onContainerStartSubmitted(container, ri);
+    //later, declare that it failed on startup
+    assertFalse(roleHistory.onNodeManagerContainerStartFailed(container));
+    assertEquals(0, roleEntry.getStarting());
+    assertEquals(1, roleEntry.getStartFailed());
+    assertEquals(1, roleEntry.getFailed());
+    assertTrue(roleEntry.isAvailable());
+    assertEquals(0, roleEntry.getActive());
+    assertEquals(0, roleEntry.getLive());
+  }
+
+  @Test
+  public void testStartFailedWithoutWarning() throws Throwable {
+    RoleStatus roleStatus = getRole0Status();
+
+    AMRMClient.ContainerRequest request =
+        requestContainer(roleStatus);
+
+    String hostname = request.getNodes().get(0);
+    assertEquals(hostname, age3Active0.hostname);
+
+    //build a container
+    MockContainer container = factory.newContainer();
+    container.setNodeId(new MockNodeId(hostname, 0));
+    container.setPriority(request.getPriority());
+
+    NodeMap nodemap = roleHistory.cloneNodemap();
+    NodeInstance allocated = nodemap.get(hostname);
+    NodeEntry roleEntry = allocated.get(roleStatus.getKey());
+
+    assertFalse(roleHistory.onNodeManagerContainerStartFailed(container));
+    assertEquals(0, roleEntry.getStarting());
+    assertEquals(1, roleEntry.getStartFailed());
+    assertEquals(1, roleEntry.getFailed());
+    assertTrue(roleEntry.isAvailable());
+    assertEquals(0, roleEntry.getActive());
+    assertEquals(0, roleEntry.getLive());
+  }
+
+  @Test
+  public void testContainerFailed() throws Throwable {
+    describe("fail a container without declaring it as starting");
+
+    RoleStatus roleStatus = getRole0Status();
+
+    AMRMClient.ContainerRequest request =
+        requestContainer(roleStatus);
+
+    String hostname = request.getNodes().get(0);
+    assertEquals(hostname, age3Active0.hostname);
+
+    //build a container
+    MockContainer container = factory.newContainer();
+    container.setNodeId(new MockNodeId(hostname, 0));
+    container.setPriority(request.getPriority());
+    roleHistory.onContainerAssigned(container);
+
+    NodeMap nodemap = roleHistory.cloneNodemap();
+    NodeInstance allocated = nodemap.get(hostname);
+    NodeEntry roleEntry = allocated.get(roleStatus.getKey());
+    assertEquals(1, roleEntry.getStarting());
+    assertFalse(roleEntry.isAvailable());
+    RoleInstance ri = new RoleInstance(container);
+    //start it
+    roleHistory.onContainerStartSubmitted(container, ri);
+    roleHistory.onContainerStarted(container);
+
+    //later, declare that it failed
+    roleHistory.onFailedContainer(
+        container,
+        false,
+        ContainerOutcome.Failed);
+    assertEquals(0, roleEntry.getStarting());
+    assertTrue(roleEntry.isAvailable());
+    assertEquals(0, roleEntry.getActive());
+    assertEquals(0, roleEntry.getLive());
+  }
+
+  @Test
+  public void testContainerFailedWithoutWarning() throws Throwable {
+    describe("fail a container without declaring it as starting");
+    RoleStatus roleStatus = getRole0Status();
+
+    AMRMClient.ContainerRequest request =
+        requestContainer(roleStatus);
+
+    String hostname = request.getNodes().get(0);
+    assertEquals(hostname, age3Active0.hostname);
+
+    //build a container
+    MockContainer container = factory.newContainer();
+    container.setNodeId(new MockNodeId(hostname, 0));
+    container.setPriority(request.getPriority());
+
+
+    NodeMap nodemap = roleHistory.cloneNodemap();
+    NodeInstance allocated = nodemap.get(hostname);
+    NodeEntry roleEntry = allocated.get(roleStatus.getKey());
+    assertTrue(roleEntry.isAvailable());
+    roleHistory.onFailedContainer(
+        container,
+        false,
+        ContainerOutcome.Failed);
+    assertEquals(0, roleEntry.getStarting());
+    assertEquals(1, roleEntry.getFailed());
+    assertTrue(roleEntry.isAvailable());
+    assertEquals(0, roleEntry.getActive());
+    assertEquals(0, roleEntry.getLive());
+  }
+
+  @Test
+  public void testAllocationListPrep() throws Throwable {
+    describe("test prepareAllocationList");
+    RoleStatus roleStatus = getRole0Status();
+
+    AMRMClient.ContainerRequest request =
+        requestContainer(roleStatus);
+
+    String hostname = request.getNodes().get(0);
+    assertEquals(hostname, age3Active0.hostname);
+
+    MockContainer container1 = factory.newContainer();
+    container1.setNodeId(new MockNodeId(hostname, 0));
+    container1.setPriority(Priority.newInstance(getRole0Status().getKey()));
+
+    MockContainer container2 = factory.newContainer();
+    container2.setNodeId(new MockNodeId(hostname, 0));
+    container2.setPriority(Priority.newInstance(getRole1Status().getKey()));
+
+    // put containers in List with role == 1 first
+    List<Container> containers = Arrays.asList((Container) container2,
+        (Container) container1);
+    List<Container> sortedContainers = roleHistory.prepareAllocationList(
+        containers);
+
+    // verify that the first container has role == 0 after sorting
+    MockContainer c1 = (MockContainer) sortedContainers.get(0);
+    assertEquals(getRole0Status().getKey(), c1.getPriority().getPriority());
+    MockContainer c2 = (MockContainer) sortedContainers.get(1);
+    assertEquals(getRole1Status().getKey(), c2.getPriority().getPriority());
+  }
+
+  @Test
+  public void testNodeUpdated() throws Throwable {
+    describe("fail a node");
+
+    RoleStatus roleStatus = getRole0Status();
+
+    AMRMClient.ContainerRequest request =
+        roleHistory.requestContainerForRole(roleStatus).getIssuedRequest();
+
+    String hostname = request.getNodes().get(0);
+    assertEquals(age3Active0.hostname, hostname);
+
+    // build a container
+    MockContainer container = factory.newContainer(new MockNodeId(hostname,
+        0), request.getPriority());
+
+    roleHistory.onContainerAssigned(container);
+
+    NodeMap nodemap = roleHistory.cloneNodemap();
+    NodeInstance allocated = nodemap.get(hostname);
+    NodeEntry roleEntry = allocated.get(roleStatus.getKey());
+    assertEquals(1, roleEntry.getStarting());
+    assertFalse(roleEntry.isAvailable());
+    RoleInstance ri = new RoleInstance(container);
+    // start it
+    roleHistory.onContainerStartSubmitted(container, ri);
+    roleHistory.onContainerStarted(container);
+
+    int startSize = nodemap.size();
+
+    // now send a list of updated (failed) nodes event
+    List<NodeReport> nodesUpdated = new ArrayList<>();
+    NodeReport nodeReport = NodeReport.newInstance(
+        NodeId.newInstance(hostname, 0),
+        NodeState.LOST,
+        null, null, null, null, 1, null, 0);
+    nodesUpdated.add(nodeReport);
+    roleHistory.onNodesUpdated(nodesUpdated);
+
+    nodemap = roleHistory.cloneNodemap();
+    int endSize = nodemap.size();
+    // as even unused nodes are added to the list, we expect the map size to
+    // be >1
+    assertTrue(startSize <= endSize);
+    assertNotNull(nodemap.get(hostname));
+    assertFalse(nodemap.get(hostname).isOnline());
+
+    // add a failure of a node we've never head of
+    String newhost = "newhost";
+    nodesUpdated = Arrays.asList(
+        NodeReport.newInstance(
+            NodeId.newInstance(newhost, 0),
+            NodeState.LOST,
+            null, null, null, null, 1, null, 0)
+    );
+    roleHistory.onNodesUpdated(nodesUpdated);
+
+    NodeMap nodemap2 = roleHistory.cloneNodemap();
+    assertNotNull(nodemap2.get(newhost));
+    assertFalse(nodemap2.get(newhost).isOnline());
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryFindNodesForNewInstances.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryFindNodesForNewInstances.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryFindNodesForNewInstances.java
new file mode 100644
index 0000000..2d49e26
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryFindNodesForNewInstances.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
+import org.apache.slider.server.appmaster.state.ContainerOutcome;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Testing finding nodes for new instances.
+ *
+ * This stresses the non-AA codepath
+ */
+public class TestRoleHistoryFindNodesForNewInstances extends
+    BaseMockAppStateTest {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRoleHistoryFindNodesForNewInstances.class);
+
+  public TestRoleHistoryFindNodesForNewInstances() throws BadConfigException {
+  }
+
+  @Override
+  public String getTestName() {
+    return "TestFindNodesForNewInstances";
+  }
+
+  private NodeInstance age1Active4;
+  private NodeInstance age2Active2;
+  private NodeInstance age3Active0;
+  private NodeInstance age4Active1;
+  private NodeInstance age2Active0;
+
+  private RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+
+  private RoleStatus roleStat;
+  private RoleStatus roleStat2;
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+
+    age1Active4 = nodeInstance(1, 4, 0, 0);
+    age2Active2 = nodeInstance(2, 2, 0, 1);
+    age3Active0 = nodeInstance(3, 0, 0, 0);
+    age4Active1 = nodeInstance(4, 1, 0, 0);
+    age2Active0 = nodeInstance(2, 0, 0, 0);
+
+    roleHistory.insert(Arrays.asList(age2Active2, age2Active0, age4Active1,
+        age1Active4, age3Active0));
+    roleHistory.buildRecentNodeLists();
+
+    roleStat = getRole0Status();
+    roleStat2 = getRole2Status();
+  }
+
+  public List<NodeInstance> findNodes(int count) {
+    return findNodes(count, roleStat);
+  }
+
+  public List<NodeInstance> findNodes(int count, RoleStatus roleStatus) {
+    List <NodeInstance> found = new ArrayList<>();
+    for (int i = 0; i < count; i++) {
+      NodeInstance f = roleHistory.findRecentNodeForNewInstance(roleStatus);
+      if (f != null) {
+        found.add(f);
+      }
+    }
+    return found;
+  }
+
+  @Test
+  public void testFind1NodeR0() throws Throwable {
+    NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat);
+    LOG.info("found: {}", found);
+    assertTrue(Arrays.asList(age3Active0).contains(found));
+  }
+
+  @Test
+  public void testFind2NodeR0() throws Throwable {
+    NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat);
+    LOG.info("found: {}", found);
+    assertTrue(Arrays.asList(age2Active0, age3Active0).contains(found));
+    NodeInstance found2 = roleHistory.findRecentNodeForNewInstance(roleStat);
+    LOG.info("found: {}", found2);
+    assertTrue(Arrays.asList(age2Active0, age3Active0).contains(found2));
+    assertNotEquals(found, found2);
+  }
+
+  @Test
+  public void testFind3NodeR0ReturnsNull() throws Throwable {
+    assertEquals(2, findNodes(2).size());
+    NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat);
+    assertNull(found);
+  }
+
+  @Test
+  public void testFindNodesOneEntry() throws Throwable {
+    List<NodeInstance> foundNodes = findNodes(4, roleStat2);
+    assertEquals(0, foundNodes.size());
+  }
+
+  @Test
+  public void testFindNodesIndependent() throws Throwable {
+    assertEquals(2, findNodes(2).size());
+    roleHistory.dump();
+    assertEquals(0, findNodes(3, roleStat2).size());
+  }
+
+  @Test
+  public void testFindNodesFallsBackWhenUsed() throws Throwable {
+    // mark age2 and active 0 as busy, expect a null back
+    age2Active0.get(getRole0Status().getKey()).onStartCompleted();
+    assertNotEquals(0, age2Active0.getActiveRoleInstances(getRole0Status()
+        .getKey()));
+    age3Active0.get(getRole0Status().getKey()).onStartCompleted();
+    assertNotEquals(0, age3Active0.getActiveRoleInstances(getRole0Status()
+        .getKey()));
+    NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat);
+    if (found != null) {
+      LOG.info(found.toFullString());
+    }
+    assertNull(found);
+  }
+  @Test
+  public void testFindNodesSkipsFailingNode() throws Throwable {
+    // mark age2 and active 0 as busy, expect a null back
+
+    NodeEntry entry0 = age2Active0.get(getRole0Status().getKey());
+    entry0.containerCompleted(
+        false,
+        ContainerOutcome.Failed);
+    assertTrue(entry0.getFailed() > 0);
+    assertTrue(entry0.getFailedRecently() > 0);
+    entry0.containerCompleted(
+        false,
+        ContainerOutcome.Failed);
+    assertFalse(age2Active0.exceedsFailureThreshold(roleStat));
+    // set failure to 1
+    roleStat.getProviderRole().nodeFailureThreshold = 1;
+    // threshold is now exceeded
+    assertTrue(age2Active0.exceedsFailureThreshold(roleStat));
+
+    // get the role & expect age3 to be picked up, even though it is older
+    NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat);
+    assertEquals(age3Active0, found);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryNIComparators.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryNIComparators.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryNIComparators.java
new file mode 100644
index 0000000..91abaa4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryNIComparators.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Unit test to verify the comparators sort as expected.
+ */
+public class TestRoleHistoryNIComparators extends BaseMockAppStateTest  {
+
+  private NodeInstance age1Active4;
+  private NodeInstance age2Active2;
+  private NodeInstance age3Active0;
+  private NodeInstance age4Active1;
+  private NodeInstance empty = new NodeInstance("empty", MockFactory
+      .ROLE_COUNT);
+  private NodeInstance age6failing;
+  private NodeInstance age1failing;
+
+  private List<NodeInstance> nodes;
+  private List<NodeInstance> nodesPlusEmpty;
+  private List<NodeInstance> allnodes;
+
+  private RoleStatus role0Status;
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+
+    role0Status = getRole0Status();
+
+    age1Active4 = nodeInstance(1001, 4, 0, 0);
+    age2Active2 = nodeInstance(1002, 2, 0, 0);
+    age3Active0 = nodeInstance(1003, 0, 0, 0);
+    age4Active1 = nodeInstance(1004, 1, 0, 0);
+    age6failing = nodeInstance(1006, 0, 0, 0);
+    age1failing = nodeInstance(1001, 0, 0, 0);
+
+    age6failing.get(role0Status.getKey()).setFailedRecently(2);
+    age1failing.get(role0Status.getKey()).setFailedRecently(1);
+
+    nodes = Arrays.asList(age2Active2, age4Active1, age1Active4, age3Active0);
+    nodesPlusEmpty = Arrays.asList(age2Active2, age4Active1, age1Active4,
+        age3Active0, empty);
+    allnodes = Arrays.asList(age6failing, age2Active2, age4Active1,
+        age1Active4, age3Active0, age1failing);
+  }
+
+  @Override
+  public String getTestName() {
+    return "TestNIComparators";
+  }
+
+  @Test
+  public void testPreferred() throws Throwable {
+    Collections.sort(nodes, new NodeInstance.Preferred(role0Status.getKey()));
+    assertListEquals(nodes, Arrays.asList(age4Active1, age3Active0,
+        age2Active2, age1Active4));
+  }
+
+  /**
+   * The preferred sort still includes failures; up to next phase in process
+   * to handle that.
+   * @throws Throwable
+   */
+  @Test
+  public void testPreferredWithFailures() throws Throwable {
+    Collections.sort(allnodes, new NodeInstance.Preferred(role0Status
+        .getKey()));
+    assertEquals(allnodes.get(0), age6failing);
+    assertEquals(allnodes.get(1), age4Active1);
+  }
+
+  @Test
+  public void testPreferredComparatorDowngradesFailures() throws Throwable {
+    NodeInstance.Preferred preferred = new NodeInstance.Preferred(role0Status
+        .getKey());
+    assertEquals(-1, preferred.compare(age6failing, age1failing));
+    assertEquals(1, preferred.compare(age1failing, age6failing));
+  }
+
+  @Test
+  public void testNewerThanNoRole() throws Throwable {
+    Collections.sort(nodesPlusEmpty, new NodeInstance.Preferred(role0Status
+        .getKey()));
+    assertListEquals(nodesPlusEmpty, Arrays.asList(age4Active1, age3Active0,
+        age2Active2, age1Active4, empty));
+  }
+
+  @Test
+  public void testMoreActiveThan() throws Throwable {
+
+    Collections.sort(nodes, new NodeInstance.MoreActiveThan(role0Status
+        .getKey()));
+    assertListEquals(nodes, Arrays.asList(age1Active4, age2Active2,
+        age4Active1, age3Active0));
+  }
+
+  @Test
+  public void testMoreActiveThanEmpty() throws Throwable {
+
+    Collections.sort(nodesPlusEmpty, new NodeInstance.MoreActiveThan(
+        role0Status.getKey()));
+    assertListEquals(nodesPlusEmpty, Arrays.asList(age1Active4, age2Active2,
+        age4Active1, age3Active0, empty));
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: YARN-6617. Services API delete call first attempt usually fails. Contributed by Jian He

Posted by ji...@apache.org.
YARN-6617. Services API delete call first attempt usually fails. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c0f96c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c0f96c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c0f96c6

Branch: refs/heads/yarn-native-services
Commit: 5c0f96c65183569f064c3e0f8de19467dd960518
Parents: e57dddc
Author: Billie Rinaldi <bi...@apache.org>
Authored: Fri May 26 12:15:28 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:26 2017 -0700

----------------------------------------------------------------------
 .../org/apache/slider/client/SliderClient.java  | 36 ++++++++++++++++----
 1 file changed, 30 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c0f96c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index 29ca471..a3ba8c0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -230,6 +230,9 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private RegistryOperations registryOperations;
 
+  private static EnumSet<YarnApplicationState> terminatedStates = EnumSet
+      .of(YarnApplicationState.FINISHED, YarnApplicationState.FAILED,
+          YarnApplicationState.KILLED);
   /**
    * Constructor
    */
@@ -699,7 +702,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     //TODO set retry window
     submissionContext.setResource(Resource.newInstance(
         conf.getLong(KEY_AM_RESOURCE_MEM, DEFAULT_KEY_AM_RESOURCE_MEM), 1));
-    submissionContext.setQueue(conf.get(KEY_YARN_QUEUE, DEFAULT_YARN_QUEUE));
+    submissionContext.setQueue(conf.get(KEY_YARN_QUEUE, app.getQueue()));
     submissionContext.setApplicationName(appName);
     submissionContext.setApplicationType(SliderKeys.APP_TYPE);
     Set<String> appTags =
@@ -1725,9 +1728,8 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
           "Application " + appName + " doesn't exist in RM.");
     }
 
-    if (app.getYarnApplicationState().ordinal() >= YarnApplicationState.FINISHED
-        .ordinal()) {
-      log.info("Application {} is in a terminated state {}", appName,
+    if (terminatedStates.contains(app.getYarnApplicationState())) {
+      log.info("Application {} is already in a terminated state {}", appName,
           app.getYarnApplicationState());
       return EXIT_SUCCESS;
     }
@@ -1738,8 +1740,30 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
           Messages.StopClusterRequestProto.newBuilder()
               .setMessage(freezeArgs.message).build();
       appMaster.stopCluster(r);
-      log.info("Application " + appName + " is gracefully stopped.");
-    } catch (IOException | YarnException e){
+      log.info("Application " + appName + " is being gracefully stopped...");
+      long startTime = System.currentTimeMillis();
+      int pollCount = 0;
+      while (true) {
+        Thread.sleep(200);
+        ApplicationReport report =
+            yarnClient.getApplicationReport(app.getApplicationId());
+        if (terminatedStates.contains(report.getYarnApplicationState())) {
+          log.info("Application " + appName + " is stopped.");
+          break;
+        }
+        // kill after 10 seconds.
+        if ((System.currentTimeMillis() - startTime) > 10000) {
+          log.info("Stop operation timeout stopping, forcefully kill the app "
+              + appName);
+          yarnClient
+              .killApplication(app.getApplicationId(), freezeArgs.message);
+          break;
+        }
+        if (++pollCount % 10 == 0) {
+          log.info("Waiting for application " + appName + " to be stopped.");
+        }
+      }
+    } catch (IOException | YarnException | InterruptedException e) {
       log.info("Failed to stop " + appName
           + " gracefully, forcefully kill the app.");
       yarnClient.killApplication(app.getApplicationId(), freezeArgs.message);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: YARN-6335. Port slider's groovy unit tests to yarn native services. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryOutstandingRequestTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryOutstandingRequestTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryOutstandingRequestTracker.java
new file mode 100644
index 0000000..7d8f5a7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryOutstandingRequestTracker.java
@@ -0,0 +1,385 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
+import org.apache.hadoop.yarn.util.resource.Resources;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.providers.PlacementPolicy;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockAppState;
+import org.apache.slider.server.appmaster.model.mock.MockContainer;
+import org.apache.slider.server.appmaster.model.mock.MockNodeId;
+import org.apache.slider.server.appmaster.model.mock.MockPriority;
+import org.apache.slider.server.appmaster.model.mock.MockResource;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.CancelSingleRequest;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.apache.slider.server.appmaster.state.ContainerAllocationOutcome;
+import org.apache.slider.server.appmaster.state.ContainerAllocationResults;
+import org.apache.slider.server.appmaster.state.ContainerPriority;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.OutstandingRequest;
+import org.apache.slider.server.appmaster.state.OutstandingRequestTracker;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Test outstanding request tracker.
+ */
+public class TestRoleHistoryOutstandingRequestTracker extends
+    BaseMockAppStateTest {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRoleHistoryOutstandingRequestTracker.class);
+
+  public static final String WORKERS_LABEL = "workers";
+  private NodeInstance host1 = new NodeInstance("host1", 3);
+  private NodeInstance host2 = new NodeInstance("host2", 3);
+  private MockResource resource = factory.newResource(48, 1);
+
+  private OutstandingRequestTracker tracker = new OutstandingRequestTracker();
+
+  public static final String WORKER = "worker";
+
+  @Override
+  public Application buildApplication() {
+    Application application = super.buildApplication();
+    Component component = new Component().name("worker").numberOfContainers(0L);
+    component.getConfiguration().setProperty(ResourceKeys.YARN_LABEL_EXPRESSION,
+        WORKERS_LABEL);
+    application.getComponents().add(component);
+    return application;
+  }
+
+  @Test
+  public void testAddRetrieveEntry() throws Throwable {
+    OutstandingRequest request = tracker.newRequest(host1, 0);
+    assertEquals(tracker.lookupPlacedRequest(0, "host1"), request);
+    assertEquals(tracker.removePlacedRequest(request), request);
+    assertNull(tracker.lookupPlacedRequest(0, "host1"));
+  }
+
+  @Test
+  public void testAddCompleteEntry() throws Throwable {
+    OutstandingRequest req1 = tracker.newRequest(host1, 0);
+    req1.buildContainerRequest(resource, getRole0Status(), 0);
+
+    tracker.newRequest(host2, 0).buildContainerRequest(resource,
+        getRole0Status(), 0);
+    tracker.newRequest(host1, 1).buildContainerRequest(resource,
+        getRole0Status(), 0);
+
+    ContainerAllocationResults allocation = tracker.onContainerAllocated(1,
+        "host1", null);
+    assertEquals(allocation.outcome, ContainerAllocationOutcome.Placed);
+    assertTrue(allocation.operations.get(0) instanceof CancelSingleRequest);
+
+    assertNull(tracker.lookupPlacedRequest(1, "host1"));
+    assertNotNull(tracker.lookupPlacedRequest(0, "host1"));
+  }
+
+  @Test
+  public void testResetOpenRequests() throws Throwable {
+    OutstandingRequest req1 = tracker.newRequest(null, 0);
+    assertFalse(req1.isLocated());
+    tracker.newRequest(host1, 0);
+    List<OutstandingRequest> openRequests = tracker.listOpenRequests();
+    assertEquals(1, openRequests.size());
+    tracker.resetOutstandingRequests(0);
+    assertTrue(tracker.listOpenRequests().isEmpty());
+    assertTrue(tracker.listPlacedRequests().isEmpty());
+  }
+
+  @Test
+  public void testRemoveOpenRequestUnissued() throws Throwable {
+    OutstandingRequest req1 = tracker.newRequest(null, 0);
+    req1.buildContainerRequest(resource, getRole0Status(), 0);
+    assertEquals(1, tracker.listOpenRequests().size());
+    MockContainer c1 = factory.newContainer(null, new MockPriority(0));
+    c1.setResource(resource);
+
+    ContainerAllocationResults allocation =
+        tracker.onContainerAllocated(0, "host1", c1);
+    ContainerAllocationOutcome outcome = allocation.outcome;
+    assertEquals(outcome, ContainerAllocationOutcome.Unallocated);
+    assertTrue(allocation.operations.isEmpty());
+    assertEquals(1, tracker.listOpenRequests().size());
+  }
+
+  @Test
+  public void testIssuedOpenRequest() throws Throwable {
+    OutstandingRequest req1 = tracker.newRequest(null, 0);
+    req1.buildContainerRequest(resource, getRole0Status(), 0);
+    assertEquals(1, tracker.listOpenRequests().size());
+
+    int pri = ContainerPriority.buildPriority(0, false);
+    assertTrue(pri > 0);
+    MockNodeId nodeId = factory.newNodeId("hostname-1");
+    MockContainer c1 = factory.newContainer(nodeId, new MockPriority(pri));
+
+    c1.setResource(resource);
+
+    ContainerRequest issued = req1.getIssuedRequest();
+    assertEquals(issued.getCapability(), resource);
+    assertEquals(issued.getPriority().getPriority(), c1.getPriority()
+        .getPriority());
+    assertTrue(req1.resourceRequirementsMatch(resource));
+
+    ContainerAllocationResults allocation =
+        tracker.onContainerAllocated(0, nodeId.getHost(), c1);
+    assertEquals(0, tracker.listOpenRequests().size());
+    assertTrue(allocation.operations.get(0) instanceof CancelSingleRequest);
+
+    assertEquals(allocation.outcome, ContainerAllocationOutcome.Open);
+    assertEquals(allocation.origin, req1);
+  }
+
+  @Test
+  public void testResetEntries() throws Throwable {
+    tracker.newRequest(host1, 0);
+    tracker.newRequest(host2, 0);
+    tracker.newRequest(host1, 1);
+    List<NodeInstance> canceled = tracker.resetOutstandingRequests(0);
+    assertEquals(2, canceled.size());
+    assertTrue(canceled.contains(host1));
+    assertTrue(canceled.contains(host2));
+    assertNotNull(tracker.lookupPlacedRequest(1, "host1"));
+    assertNull(tracker.lookupPlacedRequest(0, "host1"));
+    canceled = tracker.resetOutstandingRequests(0);
+    assertEquals(0, canceled.size());
+    assertEquals(1, tracker.resetOutstandingRequests(1).size());
+  }
+
+  @Test
+  public void testEscalation() throws Throwable {
+    // first request: default placement
+    assertEquals(getRole0Status().getPlacementPolicy(), PlacementPolicy
+        .DEFAULT);
+    Resource res0 = newResource(getRole0Status());
+    OutstandingRequest outstanding0 = tracker.newRequest(host1,
+        getRole0Status().getKey());
+    ContainerRequest initialRequest =
+        outstanding0.buildContainerRequest(res0, getRole0Status(), 0);
+    assertNotNull(outstanding0.getIssuedRequest());
+    assertTrue(outstanding0.isLocated());
+    assertFalse(outstanding0.isEscalated());
+    assertFalse(initialRequest.getRelaxLocality());
+    assertEquals(1, tracker.listPlacedRequests().size());
+
+    // second. This one doesn't get launched. This is to verify that the
+    // escalation process skips entries which are in the list but have not
+    // been issued, which can be a race condition between request issuance &
+    // escalation.
+    // (not one observed outside test authoring, but retained for completeness)
+    Resource res2 = newResource(getRole2Status());
+    OutstandingRequest outstanding2 = tracker.newRequest(host1,
+        getRole2Status().getKey());
+
+    // simulate some time escalation of role 1 MUST now be triggered
+    long interval = getRole0Status().getPlacementTimeoutSeconds() * 1000 + 500;
+    long now = interval;
+    final List<AbstractRMOperation> escalations = tracker
+        .escalateOutstandingRequests(now);
+
+    assertTrue(outstanding0.isEscalated());
+    assertFalse(outstanding2.isEscalated());
+
+    // two entries
+    assertEquals(2, escalations.size());
+    AbstractRMOperation e1 = escalations.get(0);
+    assertTrue(e1 instanceof CancelSingleRequest);
+    final CancelSingleRequest cancel = (CancelSingleRequest) e1;
+    assertEquals(initialRequest, cancel.getRequest());
+    AbstractRMOperation e2 = escalations.get(1);
+    assertTrue(e2 instanceof ContainerRequestOperation);
+    ContainerRequestOperation escRequest = (ContainerRequestOperation) e2;
+    assertTrue(escRequest.getRequest().getRelaxLocality());
+
+    // build that second request from an anti-affine entry
+    // these get placed as well
+    now += interval;
+    ContainerRequest containerReq2 =
+        outstanding2.buildContainerRequest(res2, getRole2Status(), now);
+    // escalate a little bit more
+    final List<AbstractRMOperation> escalations2 = tracker
+        .escalateOutstandingRequests(now);
+    // and expect no new entries
+    assertEquals(0, escalations2.size());
+
+    // go past the role2 timeout
+    now += getRole2Status().getPlacementTimeoutSeconds() * 1000 + 500;
+    // escalate a little bit more
+    final List<AbstractRMOperation> escalations3 = tracker
+        .escalateOutstandingRequests(now);
+    // and expect another escalation
+    assertEquals(2, escalations3.size());
+    assertTrue(outstanding2.isEscalated());
+
+    // finally add a strict entry to the mix
+    Resource res3 = newResource(getRole1Status());
+    OutstandingRequest outstanding3 = tracker.newRequest(host1,
+        getRole1Status().getKey());
+
+    final ProviderRole providerRole1 = getRole1Status().getProviderRole();
+    assertEquals(providerRole1.placementPolicy, PlacementPolicy.STRICT);
+    now += interval;
+    assertFalse(outstanding3.mayEscalate());
+    final List<AbstractRMOperation> escalations4 = tracker
+        .escalateOutstandingRequests(now);
+    assertTrue(escalations4.isEmpty());
+
+  }
+
+  /**
+   * If the placement does include a label, the initial request must
+   * <i>not</i> include it.
+   * The escalation request will contain the label, while
+   * leaving out the node list.
+   * retains the node list, but sets relaxLocality==true
+   * @throws Throwable
+   */
+  @Test
+  public void testRequestLabelledPlacement() throws Throwable {
+    NodeInstance ni = new NodeInstance("host1", 0);
+    OutstandingRequest req1 = tracker.newRequest(ni, 0);
+    Resource res0 = factory.newResource(48, 1);
+
+    RoleStatus workerRole = lookupRole(WORKER);
+    // initial request
+    ContainerRequest yarnRequest =
+        req1.buildContainerRequest(res0, workerRole, 0);
+    assertEquals(req1.label, WORKERS_LABEL);
+
+    assertNull(yarnRequest.getNodeLabelExpression());
+    assertFalse(yarnRequest.getRelaxLocality());
+    // escalation
+    ContainerRequest yarnRequest2 = req1.escalate();
+    assertNull(yarnRequest2.getNodes());
+    assertTrue(yarnRequest2.getRelaxLocality());
+    assertEquals(yarnRequest2.getNodeLabelExpression(), WORKERS_LABEL);
+  }
+
+  /**
+   * If the placement doesnt include a label, then the escalation request
+   * retains the node list, but sets relaxLocality==true.
+   * @throws Throwable
+   */
+  @Test
+  public void testRequestUnlabelledPlacement() throws Throwable {
+    NodeInstance ni = new NodeInstance("host1", 0);
+    OutstandingRequest req1 = tracker.newRequest(ni, 0);
+    Resource res0 = factory.newResource(48, 1);
+
+    // initial request
+    ContainerRequest yarnRequest = req1.buildContainerRequest(res0,
+        getRole0Status(), 0);
+    assertNotNull(yarnRequest.getNodes());
+    assertTrue(SliderUtils.isUnset(yarnRequest.getNodeLabelExpression()));
+    assertFalse(yarnRequest.getRelaxLocality());
+    ContainerRequest yarnRequest2 = req1.escalate();
+    assertNotNull(yarnRequest2.getNodes());
+    assertTrue(yarnRequest2.getRelaxLocality());
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testAARequestNoNodes() throws Throwable {
+    tracker.newAARequest(getRole0Status().getKey(), new ArrayList<>(), "");
+  }
+
+  @Test
+  public void testAARequest() throws Throwable {
+    int role0 = getRole0Status().getKey();
+    OutstandingRequest request = tracker.newAARequest(role0, Arrays
+        .asList(host1), "");
+    assertEquals(host1.hostname, request.hostname);
+    assertFalse(request.isLocated());
+  }
+
+  @Test
+  public void testAARequestPair() throws Throwable {
+    int role0 = getRole0Status().getKey();
+    OutstandingRequest request = tracker.newAARequest(role0, Arrays.asList(
+        host1, host2), "");
+    assertEquals(host1.hostname, request.hostname);
+    assertFalse(request.isLocated());
+    ContainerRequest yarnRequest = request.buildContainerRequest(
+        getRole0Status().copyResourceRequirements(new MockResource(0, 0)),
+        getRole0Status(),
+        0);
+    assertFalse(yarnRequest.getRelaxLocality());
+    assertFalse(request.mayEscalate());
+
+    assertEquals(2, yarnRequest.getNodes().size());
+  }
+
+  @Test
+  public void testBuildResourceRequirements() throws Throwable {
+    // Store original values
+    Application application = appState.getClusterStatus();
+    Component role0 = application.getComponent(getRole0Status().getGroup());
+    String origMem = role0.getResource().getMemory();
+    Integer origVcores = role0.getResource().getCpus();
+
+    // Resource values to be used for this test
+    int testMem = 32768;
+    int testVcores = 2;
+    role0.resource(new org.apache.slider.api.resource.Resource().memory(Integer
+        .toString(testMem)).cpus(testVcores));
+
+    // Test normalization disabled
+    LOG.info("Test normalization: disabled");
+    role0.getConfiguration().setProperty(
+        ResourceKeys.YARN_RESOURCE_NORMALIZATION_ENABLED, "false");
+    MockResource requestedRes = new MockResource(testMem, testVcores);
+    MockResource expectedRes = new MockResource(testMem, testVcores);
+    LOG.info("Resource requested: {}", requestedRes);
+    Resource resFinal = appState.buildResourceRequirements(getRole0Status());
+    LOG.info("Resource actual: {}", resFinal);
+    assertTrue(Resources.equals(expectedRes, resFinal));
+
+    // Test normalization enabled
+    LOG.info("Test normalization: enabled");
+    role0.getConfiguration().setProperty(
+        ResourceKeys.YARN_RESOURCE_NORMALIZATION_ENABLED, "true");
+    expectedRes = new MockResource(MockAppState.RM_MAX_RAM, testVcores);
+    LOG.info("Resource requested: {}", requestedRes);
+    resFinal = appState.buildResourceRequirements(getRole0Status());
+    LOG.info("Resource actual: {}", resFinal);
+    assertTrue(Resources.equals(expectedRes, resFinal));
+
+    // revert resource configuration to original value
+    role0.resource(new org.apache.slider.api.resource.Resource().memory(origMem)
+        .cpus(origVcores));
+  }
+
+  public Resource newResource(RoleStatus r) {
+    return appState.buildResourceRequirements(r);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRW.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRW.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRW.java
new file mode 100644
index 0000000..a936df5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRW.java
@@ -0,0 +1,371 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.providers.PlacementPolicy;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.apache.slider.server.avro.LoadedRoleHistory;
+import org.apache.slider.server.avro.RoleHistoryWriter;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Test fole history reading and writing.
+ */
+public class TestRoleHistoryRW extends BaseMockAppStateTest {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRoleHistoryRW.class);
+
+  private static long time = System.currentTimeMillis();
+  public static final String HISTORY_V1_6_ROLE =
+      "org/apache/slider/server/avro/history-v01-6-role.json";
+  public static final String HISTORY_V1_3_ROLE =
+      "org/apache/slider/server/avro/history-v01-3-role.json";
+  public static final String HISTORY_V1B_1_ROLE =
+      "org/apache/slider/server/avro/history_v01b_1_role.json";
+
+  private RoleStatus role0Status;
+  private RoleStatus role1Status;
+
+  static final ProviderRole PROVIDER_ROLE3 = new ProviderRole(
+      "role3",
+      3,
+      PlacementPolicy.STRICT,
+      3,
+      3,
+      ResourceKeys.DEF_YARN_LABEL_EXPRESSION);
+
+  @Override
+  public String getTestName() {
+    return "TestHistoryRW";
+  }
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    role0Status = getRole0Status();
+    role1Status = getRole1Status();
+  }
+
+  @Test
+  public void testWriteReadEmpty() throws Throwable {
+    RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+    roleHistory.onStart(fs, historyPath);
+    Path history = roleHistory.saveHistory(time++);
+    assertTrue(fs.getFileStatus(history).isFile());
+    RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+    historyWriter.read(fs, history);
+  }
+
+  @Test
+  public void testWriteReadData() throws Throwable {
+    RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+    assertFalse(roleHistory.onStart(fs, historyPath));
+    String addr = "localhost";
+    NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr);
+    NodeEntry ne1 = instance.getOrCreate(0);
+    ne1.setLastUsed(0xf00d);
+
+    Path history = roleHistory.saveHistory(time++);
+    assertTrue(fs.getFileStatus(history).isFile());
+    RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+    RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES);
+
+
+    LoadedRoleHistory loadedRoleHistory = historyWriter.read(fs, history);
+    assertTrue(0 < loadedRoleHistory.size());
+    rh2.rebuild(loadedRoleHistory);
+    NodeInstance ni2 = rh2.getExistingNodeInstance(addr);
+    assertNotNull(ni2);
+    NodeEntry ne2 = ni2.get(0);
+    assertNotNull(ne2);
+    assertEquals(ne2.getLastUsed(), ne1.getLastUsed());
+  }
+
+  @Test
+  public void testWriteReadActiveData() throws Throwable {
+    RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+    roleHistory.onStart(fs, historyPath);
+    String addr = "localhost";
+    String addr2 = "rack1server5";
+    NodeInstance localhost = roleHistory.getOrCreateNodeInstance(addr);
+    NodeEntry orig1 = localhost.getOrCreate(role0Status.getKey());
+    orig1.setLastUsed(0x10);
+    NodeInstance rack1server5 = roleHistory.getOrCreateNodeInstance(addr2);
+    NodeEntry orig2 = rack1server5.getOrCreate(role1Status.getKey());
+    orig2.setLive(3);
+    assertFalse(orig2.isAvailable());
+    NodeEntry orig3 = localhost.getOrCreate(role1Status.getKey());
+    orig3.setLastUsed(0x20);
+    orig3.setLive(1);
+    assertFalse(orig3.isAvailable());
+    orig3.release();
+    assertTrue(orig3.isAvailable());
+    roleHistory.dump();
+
+    long savetime = 0x0001000;
+    Path history = roleHistory.saveHistory(savetime);
+    assertTrue(fs.getFileStatus(history).isFile());
+    describe("Loaded");
+    LOG.info("testWriteReadActiveData in {}", history);
+    RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+    RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES);
+    LoadedRoleHistory loadedRoleHistory = historyWriter.read(fs, history);
+    assertEquals(3, loadedRoleHistory.size());
+    rh2.rebuild(loadedRoleHistory);
+    rh2.dump();
+
+    assertEquals(2, rh2.getClusterSize());
+    NodeInstance ni2 = rh2.getExistingNodeInstance(addr);
+    assertNotNull(ni2);
+    NodeEntry loadedNE = ni2.get(role0Status.getKey());
+    assertEquals(loadedNE.getLastUsed(), orig1.getLastUsed());
+    NodeInstance ni2b = rh2.getExistingNodeInstance(addr2);
+    assertNotNull(ni2b);
+    NodeEntry loadedNE2 = ni2b.get(role1Status.getKey());
+    assertNotNull(loadedNE2);
+    assertEquals(loadedNE2.getLastUsed(), savetime);
+    assertEquals(rh2.getThawedDataTime(), savetime);
+
+    // now start it
+    rh2.buildRecentNodeLists();
+    describe("starting");
+    rh2.dump();
+    List<NodeInstance> available0 = rh2.cloneRecentNodeList(role0Status
+        .getKey());
+    assertEquals(1, available0.size());
+
+    NodeInstance entry = available0.get(0);
+    assertEquals(entry.hostname, "localhost");
+    assertEquals(entry, localhost);
+    List<NodeInstance> available1 = rh2.cloneRecentNodeList(role1Status
+        .getKey());
+    assertEquals(2, available1.size());
+    //and verify that even if last used was set, the save time is picked up
+    assertEquals(entry.get(role1Status.getKey()).getLastUsed(), roleHistory
+        .getSaveTime());
+
+  }
+
+  @Test
+  public void testWriteThaw() throws Throwable {
+    RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+    assertFalse(roleHistory.onStart(fs, historyPath));
+    String addr = "localhost";
+    NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr);
+    NodeEntry ne1 = instance.getOrCreate(0);
+    ne1.setLastUsed(0xf00d);
+
+    Path history = roleHistory.saveHistory(time++);
+    long savetime =roleHistory.getSaveTime();
+    assertTrue(fs.getFileStatus(history).isFile());
+    RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES);
+    assertTrue(rh2.onStart(fs, historyPath));
+    NodeInstance ni2 = rh2.getExistingNodeInstance(addr);
+    assertNotNull(ni2);
+    NodeEntry ne2 = ni2.get(0);
+    assertNotNull(ne2);
+    assertEquals(ne2.getLastUsed(), ne1.getLastUsed());
+    assertEquals(rh2.getThawedDataTime(), savetime);
+  }
+
+
+  @Test
+  public void testPurgeOlderEntries() throws Throwable {
+    RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+    time = 1;
+    Path file1 = touch(historyWriter, time++);
+    Path file2 = touch(historyWriter, time++);
+    Path file3 = touch(historyWriter, time++);
+    Path file4 = touch(historyWriter, time++);
+    Path file5 = touch(historyWriter, time++);
+    Path file6 = touch(historyWriter, time++);
+
+    assertEquals(0, historyWriter.purgeOlderHistoryEntries(fs, file1));
+    assertEquals(1, historyWriter.purgeOlderHistoryEntries(fs, file2));
+    assertEquals(0, historyWriter.purgeOlderHistoryEntries(fs, file2));
+    assertEquals(3, historyWriter.purgeOlderHistoryEntries(fs, file5));
+    assertEquals(1, historyWriter.purgeOlderHistoryEntries(fs, file6));
+    try {
+      // make an impossible assertion that will fail if the method
+      // actually completes
+      assertEquals(-1, historyWriter.purgeOlderHistoryEntries(fs, file1));
+    } catch (FileNotFoundException ignored) {
+      //  expected
+    }
+
+  }
+
+  public Path touch(RoleHistoryWriter historyWriter, long timeMs)
+      throws IOException {
+    Path path = historyWriter.createHistoryFilename(historyPath, timeMs);
+    FSDataOutputStream out = fs.create(path);
+    out.close();
+    return path;
+  }
+
+  @Test
+  public void testSkipEmptyFileOnRead() throws Throwable {
+    describe("verify that empty histories are skipped on read; old histories " +
+            "purged");
+    RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+    roleHistory.onStart(fs, historyPath);
+    time = 0;
+    Path oldhistory = roleHistory.saveHistory(time++);
+
+    String addr = "localhost";
+    NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr);
+    NodeEntry ne1 = instance.getOrCreate(0);
+    ne1.setLastUsed(0xf00d);
+
+    Path goodhistory = roleHistory.saveHistory(time++);
+
+    RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+    Path touched = touch(historyWriter, time++);
+
+    RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES);
+    assertTrue(rh2.onStart(fs, historyPath));
+    NodeInstance ni2 = rh2.getExistingNodeInstance(addr);
+    assertNotNull(ni2);
+
+    //and assert the older file got purged
+    assertFalse(fs.exists(oldhistory));
+    assertTrue(fs.exists(goodhistory));
+    assertTrue(fs.exists(touched));
+  }
+
+  @Test
+  public void testSkipBrokenFileOnRead() throws Throwable {
+    describe("verify that empty histories are skipped on read; old histories " +
+            "purged");
+    RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+    roleHistory.onStart(fs, historyPath);
+    time = 0;
+    Path oldhistory = roleHistory.saveHistory(time++);
+
+    String addr = "localhost";
+    NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr);
+    NodeEntry ne1 = instance.getOrCreate(0);
+    ne1.setLastUsed(0xf00d);
+
+    Path goodhistory = roleHistory.saveHistory(time++);
+
+    RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+    Path badfile = historyWriter.createHistoryFilename(historyPath, time++);
+    FSDataOutputStream out = fs.create(badfile);
+    out.writeBytes("{broken:true}");
+    out.close();
+
+    RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES);
+    describe("IGNORE STACK TRACE BELOW");
+
+    assertTrue(rh2.onStart(fs, historyPath));
+
+    describe("IGNORE STACK TRACE ABOVE");
+    NodeInstance ni2 = rh2.getExistingNodeInstance(addr);
+    assertNotNull(ni2);
+
+    //and assert the older file got purged
+    assertFalse(fs.exists(oldhistory));
+    assertTrue(fs.exists(goodhistory));
+    assertTrue(fs.exists(badfile));
+  }
+
+  /**
+   * Test that a v1 JSON file can be read. Here the number of roles
+   * matches the current state.
+   * @throws Throwable
+   */
+  @Test
+  public void testReloadDataV13Role() throws Throwable {
+    String source = HISTORY_V1_3_ROLE;
+    RoleHistoryWriter writer = new RoleHistoryWriter();
+
+    LoadedRoleHistory loadedRoleHistory = writer.read(source);
+    assertEquals(4, loadedRoleHistory.size());
+    RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+    assertEquals(0, roleHistory.rebuild(loadedRoleHistory));
+  }
+
+  /**
+   * Test that a v1 JSON file can be read. Here more roles than expected
+   * @throws Throwable
+   */
+  @Test
+  public void testReloadDataV16Role() throws Throwable {
+    String source = HISTORY_V1_6_ROLE;
+    RoleHistoryWriter writer = new RoleHistoryWriter();
+
+    LoadedRoleHistory loadedRoleHistory = writer.read(source);
+    assertEquals(6, loadedRoleHistory.size());
+    RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+    assertEquals(3, roleHistory.rebuild(loadedRoleHistory));
+  }
+
+  /**
+   * Test that a v1 JSON file can be read. Here the number of roles
+   * is less than the current state.
+   * @throws Throwable
+   */
+  @Test
+  public void testReloadLessRoles() throws Throwable {
+    String source = HISTORY_V1_3_ROLE;
+    RoleHistoryWriter writer = new RoleHistoryWriter();
+
+    LoadedRoleHistory loadedRoleHistory = writer.read(source);
+    assertEquals(4, loadedRoleHistory.size());
+    List<ProviderRole> expandedRoles = new ArrayList(MockFactory.ROLES);
+    expandedRoles.add(PROVIDER_ROLE3);
+    RoleHistory roleHistory = new MockRoleHistory(expandedRoles);
+    assertEquals(0, roleHistory.rebuild(loadedRoleHistory));
+  }
+
+  /**
+   * Test that a v1b JSON file can be read. Here more roles than expected
+   * @throws Throwable
+   */
+  @Test
+  public void testReloadDataV1B1Role() throws Throwable {
+    String source = HISTORY_V1B_1_ROLE;
+    RoleHistoryWriter writer = new RoleHistoryWriter();
+
+    LoadedRoleHistory loadedRoleHistory = writer.read(source);
+    assertEquals(1, loadedRoleHistory.size());
+    assertEquals(2, loadedRoleHistory.roleMap.size());
+    RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+    assertEquals(0, roleHistory.rebuild(loadedRoleHistory));
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRWOrdering.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRWOrdering.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRWOrdering.java
new file mode 100644
index 0000000..0bc2282
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRWOrdering.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.avro.NewerFilesFirst;
+import org.apache.slider.server.avro.RoleHistoryWriter;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Test role history rw ordering.
+ */
+public class TestRoleHistoryRWOrdering extends BaseMockAppStateTest {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRoleHistoryRWOrdering.class);
+
+  private List<Path> paths = pathlist(
+      Arrays.asList(
+        "hdfs://localhost/history-0406c.json",
+        "hdfs://localhost/history-5fffa.json",
+        "hdfs://localhost/history-0001a.json",
+        "hdfs://localhost/history-0001f.json"
+      )
+  );
+  private Path h0406c = paths.get(0);
+  private Path h5fffa = paths.get(1);
+  private Path h0001a = paths.get(3);
+
+  public TestRoleHistoryRWOrdering() throws URISyntaxException {
+  }
+
+  List<Path> pathlist(List<String> pathnames) throws URISyntaxException {
+    List<Path> pathList = new ArrayList<>();
+    for (String p : pathnames) {
+      pathList.add(new Path(new URI(p)));
+    }
+    return pathList;
+  }
+
+  @Override
+  public String getTestName() {
+    return "TestHistoryRWOrdering";
+  }
+
+  /**
+   * This tests regexp pattern matching. It uses the current time so isn't
+   * repeatable -but it does test a wider range of values in the process
+   * @throws Throwable
+   */
+  @Test
+  public void testPatternRoundTrip() throws Throwable {
+    describe("test pattern matching of names");
+    long value=System.currentTimeMillis();
+    String name = String.format(SliderKeys.HISTORY_FILENAME_CREATION_PATTERN,
+        value);
+    String matchpattern = SliderKeys.HISTORY_FILENAME_MATCH_PATTERN;
+    Pattern pattern = Pattern.compile(matchpattern);
+    Matcher matcher = pattern.matcher(name);
+    if (!matcher.find()) {
+      throw new Exception("No match for pattern $matchpattern in $name");
+    }
+  }
+
+  @Test
+  public void testWriteSequenceReadData() throws Throwable {
+    describe("test that if multiple entries are written, the newest is picked" +
+        " up");
+    long time = System.currentTimeMillis();
+
+    RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+    assertFalse(roleHistory.onStart(fs, historyPath));
+    String addr = "localhost";
+    NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr);
+    NodeEntry ne1 = instance.getOrCreate(0);
+    ne1.setLastUsed(0xf00d);
+
+    Path history1 = roleHistory.saveHistory(time++);
+    Path history2 = roleHistory.saveHistory(time++);
+    Path history3 = roleHistory.saveHistory(time);
+
+    //inject a later file with a different name
+    sliderFileSystem.cat(new Path(historyPath, "file.json"), true, "hello," +
+        " world");
+
+
+    RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+
+    List<Path> entries = historyWriter.findAllHistoryEntries(
+        fs,
+        historyPath,
+        false);
+    assertEquals(entries.size(), 3);
+    assertEquals(entries.get(0), history3);
+    assertEquals(entries.get(1), history2);
+    assertEquals(entries.get(2), history1);
+  }
+
+  @Test
+  public void testPathStructure() throws Throwable {
+    assertEquals(h5fffa.getName(), "history-5fffa.json");
+  }
+
+  @Test
+  public void testPathnameComparator() throws Throwable {
+
+    NewerFilesFirst newerName = new NewerFilesFirst();
+
+    LOG.info("{} name is {}", h5fffa, h5fffa.getName());
+    LOG.info("{} name is {}", h0406c, h0406c.getName());
+    assertEquals(newerName.compare(h5fffa, h5fffa), 0);
+    assertTrue(newerName.compare(h5fffa, h0406c) < 0);
+    assertTrue(newerName.compare(h5fffa, h0001a) < 0);
+    assertTrue(newerName.compare(h0001a, h5fffa) > 0);
+
+  }
+
+  @Test
+  public void testPathSort() throws Throwable {
+    List<Path> paths2 = new ArrayList<>(paths);
+    RoleHistoryWriter.sortHistoryPaths(paths2);
+    assertListEquals(paths2,
+                     Arrays.asList(
+                       paths.get(1),
+                       paths.get(0),
+                       paths.get(3),
+                       paths.get(2)
+                     ));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRequestTracking.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRequestTracking.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRequestTracking.java
new file mode 100644
index 0000000..7364201
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRequestTracking.java
@@ -0,0 +1,298 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.providers.PlacementPolicy;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockContainer;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
+import org.apache.slider.server.appmaster.state.ContainerAllocationOutcome;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.OutstandingRequest;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Test the RH availability list and request tracking: that hosts
+ * get removed and added.
+ */
+public class TestRoleHistoryRequestTracking extends BaseMockAppStateTest {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRoleHistoryRequestTracking.class);
+
+  private String roleName = "test";
+
+  private NodeInstance age1Active4;
+  private NodeInstance age2Active2;
+  private NodeInstance age2Active0;
+  private NodeInstance age3Active0;
+  private NodeInstance age4Active1;
+
+  private RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+  // 1MB, 1 vcore
+  private Resource resource = Resource.newInstance(1, 1);
+
+  private RoleStatus roleStatus;
+
+  public TestRoleHistoryRequestTracking() throws BadConfigException {
+  }
+
+  AMRMClient.ContainerRequest requestContainer(RoleStatus rs) {
+    return roleHistory.requestContainerForRole(rs).getIssuedRequest();
+  }
+
+  @Override
+  public String getTestName() {
+    return "TestRoleHistoryAvailableList";
+  }
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+
+    age1Active4 = nodeInstance(1, 4, 0, 0);
+    age2Active2 = nodeInstance(2, 2, 0, 1);
+    age2Active0 = nodeInstance(2, 0, 0, 0);
+    age3Active0 = nodeInstance(3, 0, 0, 0);
+    age4Active1 = nodeInstance(4, 1, 0, 0);
+
+    roleHistory.insert(Arrays.asList(age2Active2, age2Active0, age4Active1,
+        age1Active4, age3Active0));
+    roleHistory.buildRecentNodeLists();
+    roleStatus = getRole0Status();
+    roleStatus.setResourceRequirements(Resource.newInstance(1, 1));
+  }
+
+  @Test
+  public void testAvailableListBuiltForRoles() throws Throwable {
+    List<NodeInstance> available0 = roleHistory.cloneRecentNodeList(
+        roleStatus.getKey());
+    assertListEquals(Arrays.asList(age3Active0, age2Active0), available0);
+  }
+
+  @Test
+  public void testRequestedNodeOffList() throws Throwable {
+    NodeInstance ni = roleHistory.findRecentNodeForNewInstance(roleStatus);
+    assertEquals(age3Active0, ni);
+    assertListEquals(Arrays.asList(age2Active0),
+        roleHistory.cloneRecentNodeList(roleStatus.getKey()));
+    roleHistory.requestInstanceOnNode(ni,
+        roleStatus,
+        resource
+    );
+  }
+
+  @Test
+  public void testRequestedNodeOffListWithFailures() throws Throwable {
+    assertFalse(roleHistory.cloneRecentNodeList(roleStatus.getKey()).isEmpty());
+
+    NodeEntry age3role0 = recordAsFailed(age3Active0, roleStatus.getKey(), 4);
+    assertTrue(age3Active0.isConsideredUnreliable(roleStatus.getKey(),
+        roleStatus.getNodeFailureThreshold()));
+    recordAsFailed(age2Active0, roleStatus.getKey(), 4);
+    assertTrue(age2Active0.isConsideredUnreliable(roleStatus.getKey(),
+        roleStatus.getNodeFailureThreshold()));
+    // expect to get a null node back
+    NodeInstance ni = roleHistory.findRecentNodeForNewInstance(roleStatus);
+    assertNull(ni);
+
+    // which is translated to a no-location request
+    AMRMClient.ContainerRequest req = roleHistory.requestInstanceOnNode(ni,
+        roleStatus,
+        resource).getIssuedRequest();
+
+    assertNull(req.getNodes());
+
+    LOG.info("resetting failure count");
+    age3role0.resetFailedRecently();
+    roleHistory.dump();
+    assertEquals(0, age3role0.getFailedRecently());
+    assertFalse(age3Active0.isConsideredUnreliable(roleStatus.getKey(),
+        roleStatus.getNodeFailureThreshold()));
+    assertFalse(roleHistory.cloneRecentNodeList(roleStatus.getKey()).isEmpty());
+    // looking for a node should now find one
+    ni = roleHistory.findRecentNodeForNewInstance(roleStatus);
+    assertEquals(ni, age3Active0);
+    req = roleHistory.requestInstanceOnNode(ni, roleStatus, resource)
+        .getIssuedRequest();
+    assertEquals(1, req.getNodes().size());
+  }
+
+  /**
+   * Verify that strict placement policies generate requests for nodes
+   * irrespective of their failed status.
+   * @throws Throwable
+   */
+  @Test
+  public void testStrictPlacementIgnoresFailures() throws Throwable {
+
+    RoleStatus targetRole = getRole1Status();
+    final ProviderRole providerRole1 = targetRole.getProviderRole();
+    assertEquals(providerRole1.placementPolicy, PlacementPolicy.STRICT);
+    int key1 = targetRole.getKey();
+    int key0 = getRole0Status().getKey();
+
+    List<NodeInstance> nodes0 = Arrays.asList(age1Active4, age2Active0,
+        age2Active2, age3Active0, age4Active1);
+    recordAllFailed(key0, 4, nodes0);
+    recordAllFailed(key1, 4, nodes0);
+
+    // trigger a list rebuild
+    roleHistory.buildRecentNodeLists();
+    List<NodeInstance> recentRole0 = roleHistory.cloneRecentNodeList(key0);
+    assertTrue(recentRole0.indexOf(age3Active0) < recentRole0
+        .indexOf(age2Active0));
+
+    // the non-strict role has no suitable nodes
+    assertNull(roleHistory.findRecentNodeForNewInstance(getRole0Status()));
+
+
+    NodeInstance ni = roleHistory.findRecentNodeForNewInstance(targetRole);
+    assertNotNull(ni);
+
+    NodeInstance ni2 = roleHistory.findRecentNodeForNewInstance(targetRole);
+    assertNotNull(ni2);
+    assertNotEquals(ni, ni2);
+  }
+
+  @Test
+  public void testFindAndRequestNode() throws Throwable {
+    AMRMClient.ContainerRequest req = requestContainer(roleStatus);
+
+    assertEquals(age3Active0.hostname, req.getNodes().get(0));
+    List<NodeInstance> a2 = roleHistory.cloneRecentNodeList(roleStatus
+        .getKey());
+    assertListEquals(Arrays.asList(age2Active0), a2);
+  }
+
+  @Test
+  public void testRequestedNodeIntoReqList() throws Throwable {
+    requestContainer(roleStatus);
+    List<OutstandingRequest> requests = roleHistory.listPlacedRequests();
+    assertEquals(1, requests.size());
+    assertEquals(age3Active0.hostname, requests.get(0).hostname);
+  }
+
+  @Test
+  public void testCompletedRequestDropsNode() throws Throwable {
+    AMRMClient.ContainerRequest req = requestContainer(roleStatus);
+    List<OutstandingRequest> requests = roleHistory.listPlacedRequests();
+    assertEquals(1, requests.size());
+    String hostname = requests.get(0).hostname;
+    assertEquals(age3Active0.hostname, hostname);
+    assertEquals(hostname, req.getNodes().get(0));
+    MockContainer container = factory.newContainer(req, hostname);
+    assertOnContainerAllocated(container, 2, 1);
+    assertNoOutstandingPlacedRequests();
+  }
+
+  public void assertOnContainerAllocated(Container c1, int p1, int p2) {
+    assertNotEquals(ContainerAllocationOutcome.Open, roleHistory
+        .onContainerAllocated(c1, p1, p2).outcome);
+  }
+
+  public void assertOnContainerAllocationOpen(Container c1, int p1, int p2) {
+    assertEquals(ContainerAllocationOutcome.Open, roleHistory
+        .onContainerAllocated(c1, p1, p2).outcome);
+  }
+
+  void assertNoOutstandingPlacedRequests() {
+    assertTrue(roleHistory.listPlacedRequests().isEmpty());
+  }
+
+  public void assertOutstandingPlacedRequests(int i) {
+    assertEquals(i, roleHistory.listPlacedRequests().size());
+  }
+
+  @Test
+  public void testTwoRequests() throws Throwable {
+    AMRMClient.ContainerRequest req = requestContainer(roleStatus);
+    AMRMClient.ContainerRequest req2 = requestContainer(roleStatus);
+    List<OutstandingRequest> requests = roleHistory.listPlacedRequests();
+    assertEquals(2, requests.size());
+    MockContainer container = factory.newContainer(req, req.getNodes().get(0));
+    assertOnContainerAllocated(container, 2, 1);
+    assertOutstandingPlacedRequests(1);
+    container = factory.newContainer(req2, req2.getNodes().get(0));
+    assertOnContainerAllocated(container, 2, 2);
+    assertNoOutstandingPlacedRequests();
+  }
+
+  @Test
+  public void testThreeRequestsOneUnsatisified() throws Throwable {
+    AMRMClient.ContainerRequest req = requestContainer(roleStatus);
+    AMRMClient.ContainerRequest req2 = requestContainer(roleStatus);
+    AMRMClient.ContainerRequest req3 = requestContainer(roleStatus);
+    List<OutstandingRequest> requests = roleHistory.listPlacedRequests();
+    assertEquals(2, requests.size());
+    MockContainer container = factory.newContainer(req, req.getNodes().get(0));
+    assertOnContainerAllocated(container, 2, 1);
+    assertOutstandingPlacedRequests(1);
+
+    container = factory.newContainer(req3, "three");
+    assertOnContainerAllocationOpen(container, 3, 2);
+    assertOutstandingPlacedRequests(1);
+
+    // the final allocation will trigger a cleanup
+    container = factory.newContainer(req2, "four");
+    // no node dropped
+    assertEquals(ContainerAllocationOutcome.Unallocated,
+           roleHistory.onContainerAllocated(container, 3, 3).outcome);
+    // yet the list is now empty
+    assertNoOutstandingPlacedRequests();
+    roleHistory.listOpenRequests().isEmpty();
+
+    // and the remainder goes onto the available list
+    List<NodeInstance> a2 = roleHistory.cloneRecentNodeList(roleStatus
+        .getKey());
+    assertListEquals(Arrays.asList(age2Active0), a2);
+  }
+
+  @Test
+  public void testThreeRequests() throws Throwable {
+    AMRMClient.ContainerRequest req = requestContainer(roleStatus);
+    AMRMClient.ContainerRequest req2 = requestContainer(roleStatus);
+    AMRMClient.ContainerRequest req3 = requestContainer(roleStatus);
+    assertOutstandingPlacedRequests(2);
+    assertNull(req3.getNodes());
+    MockContainer container = factory.newContainer(req, req.getNodes().get(0));
+    assertOnContainerAllocated(container, 3, 1);
+    assertOutstandingPlacedRequests(1);
+    container = factory.newContainer(req2, req2.getNodes().get(0));
+    assertOnContainerAllocated(container, 3, 2);
+    assertNoOutstandingPlacedRequests();
+    container = factory.newContainer(req3, "three");
+    assertOnContainerAllocationOpen(container, 3, 3);
+    assertNoOutstandingPlacedRequests();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryUpdateBlacklist.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryUpdateBlacklist.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryUpdateBlacklist.java
new file mode 100644
index 0000000..33e7930
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryUpdateBlacklist.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.server.appmaster.actions.ResetFailureWindow;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockAM;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRMOperationHandler;
+import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.UpdateBlacklistOperation;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+/**
+ * Test updating blacklist.
+ */
+public class TestRoleHistoryUpdateBlacklist extends BaseMockAppStateTest {
+  private RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+  private Collection<RoleStatus> roleStatuses;
+  private RoleStatus roleStatus;
+  private NodeInstance ni;
+
+  public TestRoleHistoryUpdateBlacklist() throws BadConfigException {
+  }
+
+  @Override
+  public String getTestName() {
+    return "TestUpdateBlacklist";
+  }
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    ni = nodeInstance(1, 0, 0, 0);
+    roleHistory.insert(Arrays.asList(ni));
+    roleHistory.buildRecentNodeLists();
+    appState.setRoleHistory(roleHistory);
+    roleStatus = getRole0Status();
+    roleStatuses = Arrays.asList(roleStatus);
+  }
+
+  @Test
+  public void testUpdateBlacklist() {
+    assertFalse(ni.isBlacklisted());
+
+    // at threshold, blacklist is unmodified
+    recordAsFailed(ni, roleStatus.getKey(), MockFactory.NODE_FAILURE_THRESHOLD);
+    UpdateBlacklistOperation op = roleHistory.updateBlacklist(roleStatuses);
+    assertNull(op);
+    assertFalse(ni.isBlacklisted());
+
+    // threshold is reached, node goes on blacklist
+    recordAsFailed(ni, roleStatus.getKey(), 1);
+    op = roleHistory.updateBlacklist(roleStatuses);
+    assertNotNull(op);
+    assertTrue(ni.isBlacklisted());
+
+    // blacklist remains unmodified
+    op = roleHistory.updateBlacklist(roleStatuses);
+    assertNull(op);
+    assertTrue(ni.isBlacklisted());
+
+    // failure threshold reset, node goes off blacklist
+    ni.resetFailedRecently();
+    op = roleHistory.updateBlacklist(roleStatuses);
+    assertNotNull(op);
+    assertFalse(ni.isBlacklisted());
+  }
+
+  @Test
+  public void testBlacklistOperations()
+      throws Exception {
+    recordAsFailed(ni, roleStatus.getKey(), MockFactory
+        .NODE_FAILURE_THRESHOLD + 1);
+
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    assertListLength(ops, 1);
+    AbstractRMOperation op = ops.get(0);
+    assertTrue(op instanceof UpdateBlacklistOperation);
+    assertTrue(ni.isBlacklisted());
+
+    MockRMOperationHandler handler = new MockRMOperationHandler();
+    assertEquals(0, handler.getBlacklisted());
+    handler.execute(ops);
+    assertEquals(1, handler.getBlacklisted());
+
+    ResetFailureWindow resetter = new ResetFailureWindow(handler);
+    resetter.execute(new MockAM(), null, appState);
+    assertEquals(0, handler.getBlacklisted());
+    assertFalse(ni.isBlacklisted());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/Allocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/Allocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/Allocator.java
new file mode 100644
index 0000000..419f2fb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/Allocator.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.slider.common.tools.SliderUtils;
+
+/**
+ * Provides allocation services to a cluster -both random and placed.
+ *
+ * Important: container allocations need an app attempt ID put into the
+ * container ID
+ */
+public class Allocator {
+
+  private final MockYarnCluster cluster;
+  /**
+   * Rolling index into the cluster used for the next "random" assignment.
+   */
+  private int rollingIndex = 0;
+
+  Allocator(MockYarnCluster cluster) {
+    this.cluster = cluster;
+  }
+
+  /**
+   * Allocate a node using the list of nodes in the container as the
+   * hints.
+   * @param request request
+   * @return the allocated container -or null for none
+   */
+  MockContainer allocate(AMRMClient.ContainerRequest request) {
+    MockYarnCluster.MockYarnClusterNode node = null;
+    MockYarnCluster.MockYarnClusterContainer allocated = null;
+    if (SliderUtils.isNotEmpty(request.getNodes())) {
+      for (String host : request.getNodes()) {
+        node = cluster.lookup(host);
+        allocated = node.allocate();
+        if (allocated != null) {
+          break;
+        }
+      }
+    }
+
+    if (allocated != null) {
+      return createContainerRecord(request, allocated, node);
+    } else {
+      if (request.getRelaxLocality() || request.getNodes().isEmpty()) {
+        // fallback to anywhere
+        return allocateRandom(request);
+      } else {
+        //no match and locality can't be requested
+        return null;
+      }
+    }
+  }
+
+  /**
+   * Allocate a node without any positioning -use whatever policy this allocator
+   * chooses.
+   * @param request request
+   * @return the allocated container -or null for none
+   */
+  MockContainer allocateRandom(AMRMClient.ContainerRequest request) {
+    int start = rollingIndex;
+    MockYarnCluster.MockYarnClusterNode node = cluster.nodeAt(rollingIndex);
+    MockYarnCluster.MockYarnClusterContainer allocated = node.allocate();
+    // if there is no space, try again -but stop when all the nodes
+    // have failed
+    while (allocated == null && start != nextIndex()) {
+      node = cluster.nodeAt(rollingIndex);
+      allocated = node.allocate();
+    }
+
+    //here the allocation is set, so create the response
+    return createContainerRecord(request, allocated, node);
+  }
+
+  /**
+   * Create a container record -if one was allocated.
+   * @param allocated allocation -may be null
+   * @param node node with the container
+   * @return a container record, or null if there was no allocation
+   */
+  public MockContainer createContainerRecord(
+      AMRMClient.ContainerRequest request,
+      MockYarnCluster.MockYarnClusterContainer allocated,
+      MockYarnCluster.MockYarnClusterNode node) {
+    if (allocated == null) {
+      // no space
+      return null;
+    }
+    MockContainer container = new MockContainer();
+    container.setId(new MockContainerId(allocated.getCid()));
+    container.setNodeId(node.getNodeId());
+    container.setNodeHttpAddress(node.httpAddress());
+    container.setPriority(request.getPriority());
+    container.setResource(request.getCapability());
+    return container;
+  }
+
+  public int nextIndex() {
+    rollingIndex = (rollingIndex + 1) % cluster.getClusterSize();
+    return rollingIndex;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
new file mode 100644
index 0000000..eca8401
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java
@@ -0,0 +1,524 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.BadClusterStateException;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.core.exceptions.SliderInternalStateException;
+import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
+import org.apache.slider.core.main.LauncherExitCodes;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.CancelSingleRequest;
+import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
+import org.apache.slider.server.appmaster.state.ContainerAssignment;
+import org.apache.slider.server.appmaster.state.ContainerOutcome;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.NodeMap;
+import org.apache.slider.server.appmaster.state.ProviderAppState;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.apache.slider.server.appmaster.state.StateAccessForProviders;
+import org.apache.slider.utils.SliderTestBase;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map.Entry;
+
+/**
+ * Base for app state tests.
+ */
+public abstract class BaseMockAppStateTest extends SliderTestBase implements
+    MockRoles {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(BaseMockAppStateTest.class);
+  protected static final List<ContainerId> EMPTY_ID_LIST = Collections
+      .emptyList();
+
+  protected final MockFactory factory = MockFactory.INSTANCE;
+  protected MockAppState appState;
+  protected MockYarnEngine engine;
+  protected FileSystem fs;
+  protected SliderFileSystem sliderFileSystem;
+  protected File historyWorkDir;
+  protected Path historyPath;
+  protected MockApplicationId applicationId;
+  protected MockApplicationAttemptId applicationAttemptId;
+  protected StateAccessForProviders stateAccess;
+
+  /**
+   * Override point: called in setup() to create the YARN engine; can
+   * be changed for different sizes and options.
+   * @return
+   */
+  public MockYarnEngine createYarnEngine() {
+    return new MockYarnEngine(8, 8);
+  }
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    YarnConfiguration conf = SliderUtils.createConfiguration();
+    fs = FileSystem.get(new URI("file:///"), conf);
+    sliderFileSystem = new SliderFileSystem(fs, conf);
+    engine = createYarnEngine();
+    initApp();
+  }
+
+  /**
+   * Initialize the application.
+   * This uses the binding information supplied by {@link #buildBindingInfo()}.
+   */
+  protected void initApp()
+      throws IOException, BadConfigException, BadClusterStateException {
+    String historyDirName = getTestName();
+    applicationId = new MockApplicationId(1, 0);
+    applicationAttemptId = new MockApplicationAttemptId(applicationId, 1);
+
+    historyWorkDir = new File("target/history", historyDirName);
+    historyPath = new Path(historyWorkDir.toURI());
+    fs.delete(historyPath, true);
+    appState = new MockAppState(buildBindingInfo());
+    stateAccess = new ProviderAppState(getTestName(), appState);
+  }
+
+  /**
+   * Build the binding info from the default constructor values,
+   * the roles from {@link #factory}, and an instance definition.
+   * from {@link #buildApplication()} ()}
+   * @return
+   */
+  protected AppStateBindingInfo buildBindingInfo() {
+    AppStateBindingInfo binding = new AppStateBindingInfo();
+    binding.application = buildApplication();
+    //binding.roles = new ArrayList<>(factory.ROLES);
+    binding.fs = fs;
+    binding.historyPath = historyPath;
+    binding.nodeReports = engine.getNodeReports();
+    return binding;
+  }
+
+  /**
+   * Override point, define the instance definition.
+   * @return the instance definition
+   */
+  public Application buildApplication() {
+    return factory.newApplication(0, 0, 0).name(getTestName());
+  }
+
+  /**
+   * Get the test name ... defaults to method name
+   * @return the method name
+   */
+  public String getTestName() {
+    return methodName.getMethodName();
+  }
+
+  public RoleStatus getRole0Status() {
+    return lookupRole(ROLE0);
+  }
+
+  public RoleStatus lookupRole(String role) {
+    return appState.lookupRoleStatus(role);
+  }
+
+  public RoleStatus getRole1Status() {
+    return lookupRole(ROLE1);
+  }
+
+  public RoleStatus getRole2Status() {
+    return lookupRole(ROLE2);
+  }
+
+  /**
+   * Build a role instance from a container assignment.
+   * @param assigned
+   * @return the instance
+   */
+  public RoleInstance roleInstance(ContainerAssignment assigned) {
+    Container target = assigned.container;
+    RoleInstance ri = new RoleInstance(target);
+    ri.roleId = assigned.role.getPriority();
+    ri.role = assigned.role.getName();
+    return ri;
+  }
+
+  public NodeInstance nodeInstance(long age, int live0, int live1, int live2) {
+    NodeInstance ni = new NodeInstance(String.format("age%d-[%d,%d,%d]", age,
+        live0, live1, live2), MockFactory.ROLE_COUNT);
+    ni.getOrCreate(getRole0Status().getKey()).setLastUsed(age);
+    ni.getOrCreate(getRole0Status().getKey()).setLive(live0);
+    if (live1 > 0) {
+      ni.getOrCreate(getRole1Status().getKey()).setLive(live1);
+    }
+    if (live2 > 0) {
+      ni.getOrCreate(getRole2Status().getKey()).setLive(live2);
+    }
+    return ni;
+  }
+
+  /**
+   * Create a container status event.
+   * @param c container
+   * @return a status
+   */
+  ContainerStatus containerStatus(Container c) {
+    return containerStatus(c.getId());
+  }
+
+  /**
+   * Create a container status instance for the given ID, declaring
+   * that it was shut down by the application itself.
+   * @param cid container Id
+   * @return the instance
+   */
+  public ContainerStatus containerStatus(ContainerId cid) {
+    ContainerStatus status = containerStatus(cid,
+        LauncherExitCodes.EXIT_CLIENT_INITIATED_SHUTDOWN);
+    return status;
+  }
+
+  public ContainerStatus containerStatus(ContainerId cid, int exitCode) {
+    ContainerStatus status = ContainerStatus.newInstance(
+        cid,
+        ContainerState.COMPLETE,
+        "",
+        exitCode);
+    return status;
+  }
+
+  /**
+   * Create nodes and bring them to the started state.
+   * @return a list of roles
+   */
+  protected List<RoleInstance> createAndStartNodes()
+      throws TriggerClusterTeardownException, SliderInternalStateException {
+    return createStartAndStopNodes(new ArrayList<>());
+  }
+
+  /**
+   * Create, Start and stop nodes.
+   * @param completionResults List filled in with the status on all completed
+   *                          nodes
+   * @return the nodes
+   */
+  public List<RoleInstance> createStartAndStopNodes(
+      List<AppState.NodeCompletionResult> completionResults)
+      throws TriggerClusterTeardownException, SliderInternalStateException {
+    List<ContainerId> released = new ArrayList<>();
+    List<RoleInstance> instances = createAndSubmitNodes(released);
+    processSubmissionOperations(instances, completionResults, released);
+    return instances;
+  }
+
+  /**
+   * Process the start/stop operations.
+   * @param instances
+   * @param completionResults
+   * @param released
+   */
+  public void processSubmissionOperations(
+      List<RoleInstance> instances,
+      List<AppState.NodeCompletionResult> completionResults,
+      List<ContainerId> released) {
+    for (RoleInstance instance : instances) {
+      LOG.debug("Started {} on {}", instance.role, instance.id);
+      assertNotNull(appState.onNodeManagerContainerStarted(instance
+          .getContainerId()));
+    }
+    releaseContainers(completionResults,
+        released,
+        ContainerState.COMPLETE,
+        "released",
+        0
+    );
+  }
+
+  /**
+   * Release a list of containers, updating the completion results.
+   * @param completionResults
+   * @param containerIds
+   * @param containerState
+   * @param exitText
+   * @param containerExitCode
+   * @return
+   */
+  public void releaseContainers(
+      List<AppState.NodeCompletionResult> completionResults,
+      List<ContainerId> containerIds,
+      ContainerState containerState,
+      String exitText,
+      int containerExitCode) {
+    for (ContainerId id : containerIds) {
+      ContainerStatus status = ContainerStatus.newInstance(id,
+          containerState,
+          exitText,
+          containerExitCode);
+      completionResults.add(appState.onCompletedContainer(status));
+    }
+  }
+
+  /**
+   * Create nodes and submit them.
+   * @return a list of roles
+   */
+  public List<RoleInstance> createAndSubmitNodes()
+      throws TriggerClusterTeardownException, SliderInternalStateException {
+    return createAndSubmitNodes(new ArrayList<>());
+  }
+
+  /**
+   * Create nodes and submit them.
+   * @return a list of roles
+   */
+  public List<RoleInstance> createAndSubmitNodes(List<ContainerId> containerIds)
+      throws TriggerClusterTeardownException, SliderInternalStateException {
+    return createAndSubmitNodes(containerIds, new ArrayList<>());
+  }
+
+  /**
+   * Create nodes and submit them.
+   * @return a list of roles allocated
+   */
+  public List<RoleInstance> createAndSubmitNodes(
+      List<ContainerId> containerIds,
+      List<AbstractRMOperation> operationsOut)
+      throws TriggerClusterTeardownException, SliderInternalStateException {
+    List<AbstractRMOperation> ops = appState.reviewRequestAndReleaseNodes();
+    return submitOperations(ops, containerIds, operationsOut);
+  }
+
+  public List<RoleInstance> submitOperations(
+      List<AbstractRMOperation> operationsIn,
+      List<ContainerId> released) {
+    return submitOperations(operationsIn, released, new ArrayList<>());
+  }
+
+  /**
+   * Process the RM operations and send <code>onContainersAllocated</code>
+   * events to the app state.
+   * @param operationsIn list of incoming ops
+   * @param released released containers
+   * @return list of outbound operations
+   */
+  public List<RoleInstance> submitOperations(
+      List<AbstractRMOperation> operationsIn,
+      List<ContainerId> released,
+      List<AbstractRMOperation> operationsOut) {
+    List<Container> allocatedContainers = engine.execute(operationsIn,
+        released);
+    List<ContainerAssignment> assignments = new ArrayList<>();
+    appState.onContainersAllocated(allocatedContainers, assignments,
+        operationsOut);
+
+    List<RoleInstance> roles = new ArrayList<>();
+    for (ContainerAssignment assigned : assignments) {
+      Container container = assigned.container;
+      RoleInstance ri = roleInstance(assigned);
+      //tell the app it arrived
+      LOG.debug("Start submitted {} on ${}", ri.role, container.getId());
+      appState.containerStartSubmitted(container, ri);
+      roles.add(ri);
+    }
+    return roles;
+  }
+
+  /**
+   * Add the AM to the app state.
+   */
+  protected void addAppMastertoAppState() {
+//    appState.buildAppMasterNode(
+//        new MockContainerId(applicationAttemptId, 999999L),
+//        "appmaster",
+//        0,
+//        null);
+  }
+
+  /**
+   * Extract the list of container IDs from the list of role instances.
+   * @param instances instance list
+   * @param role role to look up
+   * @return the list of CIDs
+   */
+  public List<ContainerId> extractContainerIds(
+      List<RoleInstance> instances,
+      String role) {
+    List<ContainerId> ids = new ArrayList<>();
+    for (RoleInstance ri : instances) {
+      if (ri.role.equals(role)) {
+        ids.add(ri.getContainerId());
+      }
+    }
+    return ids;
+  }
+
+  /**
+   * Record a node as failing.
+   * @param node
+   * @param id
+   * @param count
+   * @return the entry
+   */
+  public NodeEntry recordAsFailed(NodeInstance node, int id, int count) {
+    NodeEntry entry = node.getOrCreate(id);
+    for (int i = 1; i <= count; i++) {
+      entry.containerCompleted(
+          false,
+          ContainerOutcome.Failed);
+    }
+    return entry;
+  }
+
+  protected void recordAllFailed(int id, int count, List<NodeInstance> nodes) {
+    for (NodeInstance node : nodes) {
+      recordAsFailed(node, id, count);
+    }
+  }
+
+  /**
+   * Get the container request of an indexed entry. Includes some assertions
+   * for better diagnostics
+   * @param ops operation list
+   * @param index index in the list
+   * @return the request.
+   */
+  public AMRMClient.ContainerRequest getRequest(List<AbstractRMOperation> ops,
+      int index) {
+    assertTrue(index < ops.size());
+    AbstractRMOperation op = ops.get(index);
+    assertTrue(op instanceof ContainerRequestOperation);
+    return ((ContainerRequestOperation) op).getRequest();
+  }
+
+  /**
+   * Get the cancel request of an indexed entry. Includes some assertions for
+   * better diagnostics
+   * @param ops operation list
+   * @param index index in the list
+   * @return the request.
+   */
+  public AMRMClient.ContainerRequest getCancel(List<AbstractRMOperation> ops,
+      int index) {
+    assertTrue(index < ops.size());
+    AbstractRMOperation op = ops.get(index);
+    assertTrue(op instanceof CancelSingleRequest);
+    return ((CancelSingleRequest) op).getRequest();
+  }
+
+  /**
+   * Get the single request of a list of operations; includes the check for
+   * the size.
+   * @param ops operations list of size 1
+   * @return the request within the first ContainerRequestOperation
+   */
+  public AMRMClient.ContainerRequest getSingleRequest(
+      List<AbstractRMOperation> ops) {
+    assertEquals(1, ops.size());
+    return getRequest(ops, 0);
+  }
+
+  /**
+   * Get the single request of a list of operations; includes the check for
+   * the size.
+   * @param ops operations list of size 1
+   * @return the request within the first operation
+   */
+  public AMRMClient.ContainerRequest getSingleCancel(
+      List<AbstractRMOperation> ops) {
+    assertEquals(1, ops.size());
+    return getCancel(ops, 0);
+  }
+
+  /**
+   * Get the single release of a list of operations; includes the check for
+   * the size.
+   * @param ops operations list of size 1
+   * @return the request within the first operation
+   */
+  public ContainerReleaseOperation getSingleRelease(
+      List<AbstractRMOperation> ops) {
+    assertEquals(1, ops.size());
+    AbstractRMOperation op = ops.get(0);
+    assertTrue(op instanceof ContainerReleaseOperation);
+    return (ContainerReleaseOperation) op;
+  }
+
+  /**
+   * Get the node information as a large JSON String.
+   * @return
+   */
+  protected String nodeInformationSnapshotAsString()
+      throws UnsupportedEncodingException, JsonProcessingException {
+    return prettyPrintAsJson(stateAccess.getNodeInformationSnapshot());
+  }
+
+  /**
+   * Scan through all containers and assert that the assignment is AA.
+   * @param index role index
+   */
+  protected void assertAllContainersAA(int index) {
+    for (Entry<String, NodeInstance> nodeMapEntry : cloneNodemap().entrySet()) {
+      String name = nodeMapEntry.getKey();
+      NodeInstance ni = nodeMapEntry.getValue();
+      NodeEntry nodeEntry = ni.get(index);
+      assertTrue("too many instances on node " + name, nodeEntry == null ||
+          nodeEntry.isAntiAffinityConstraintHeld());
+    }
+  }
+
+  /**
+   * Get a snapshot of the nodemap of the application state.
+   * @return a cloned nodemap
+   */
+  protected NodeMap cloneNodemap() {
+    return appState.getRoleHistory().cloneNodemap();
+  }
+
+  /**
+   * Issue a nodes updated event.
+   * @param report report to notify
+   * @return response of AM
+   */
+  protected AppState.NodeUpdatedOutcome updateNodes(NodeReport report) {
+    return appState.onNodesUpdated(Collections.singletonList(report));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockAM.java
new file mode 100644
index 0000000..66ae0f9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockAM.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.slider.server.appmaster.SliderAppMaster;
+
+/**
+ * Mock AM.
+ */
+public class MockAM extends SliderAppMaster {
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockAppState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockAppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockAppState.java
new file mode 100644
index 0000000..2fcf054
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockAppState.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.slider.core.exceptions.BadClusterStateException;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.server.appmaster.management.MetricsAndMonitoring;
+import org.apache.slider.server.appmaster.state.AbstractClusterServices;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Extended app state that makes more things public.
+ */
+public class MockAppState extends AppState {
+  public static final int RM_MAX_RAM = 4096;
+  public static final int RM_MAX_CORES = 64;
+
+  private long time = -1;
+
+  public MockAppState(AbstractClusterServices recordFactory) {
+    super(recordFactory, new MetricsAndMonitoring());
+    setContainerLimits(1, RM_MAX_RAM, 1, RM_MAX_CORES);
+  }
+
+  /**
+   * Instance with a mock record factory.
+   */
+  public MockAppState() {
+    this(new MockClusterServices());
+  }
+
+  public MockAppState(AppStateBindingInfo bindingInfo)
+      throws BadClusterStateException, IOException, BadConfigException {
+    this();
+    buildInstance(bindingInfo);
+  }
+
+  public Map<String, ProviderRole> getRoleMap() {
+    return super.getRoleMap();
+  }
+
+  /**
+   * Current time. if the <code>time</code> field
+   * is set, that value is returned
+   * @return the current time.
+   */
+  protected long now() {
+    if (time > 0) {
+      return time;
+    }
+    return System.currentTimeMillis();
+  }
+
+  public void setTime(long newTime) {
+    this.time = newTime;
+  }
+
+  public void incTime(long inc) {
+    this.time = this.time + inc;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockApplicationAttemptId.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockApplicationAttemptId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockApplicationAttemptId.java
new file mode 100644
index 0000000..b509625
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockApplicationAttemptId.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.mock;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+class MockApplicationAttemptId extends ApplicationAttemptId {
+
+  private ApplicationId applicationId;
+  private int attemptId;
+
+  public MockApplicationAttemptId() {
+  }
+
+  public MockApplicationAttemptId(ApplicationId applicationId, int attemptId) {
+    this.applicationId = applicationId;
+    this.attemptId = attemptId;
+  }
+
+  @Override
+  public ApplicationId getApplicationId() {
+    return applicationId;
+  }
+
+  @Override
+  public void setApplicationId(ApplicationId applicationId) {
+    this.applicationId = applicationId;
+  }
+
+  @Override
+  public int getAttemptId() {
+    return attemptId;
+  }
+
+  @Override
+  public void setAttemptId(int attemptId) {
+    this.attemptId = attemptId;
+  }
+
+  @Override
+  protected void build() {
+
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: YARN-6335. Port slider's groovy unit tests to yarn native services. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ContainerOutcome.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ContainerOutcome.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ContainerOutcome.java
index 59ab30b..6df4bf4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ContainerOutcome.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ContainerOutcome.java
@@ -29,7 +29,7 @@ public enum ContainerOutcome {
   Completed,
   Failed,
   Failed_limits_exceeded,
-  Node_failure,
+  Disk_failure,
   Preempted;
 
   /**
@@ -48,7 +48,7 @@ public enum ContainerOutcome {
         // could either be a release or node failure. Treat as completion
         return Completed;
       case ContainerExitStatus.DISKS_FAILED:
-        return Node_failure;
+        return Disk_failure;
       case ContainerExitStatus.PREEMPTED:
         return Preempted;
       case ContainerExitStatus.KILLED_EXCEEDED_PMEM:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/NodeEntry.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/NodeEntry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/NodeEntry.java
index eb8ff03..d57b6d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/NodeEntry.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/NodeEntry.java
@@ -222,7 +222,7 @@ public class NodeEntry implements Cloneable {
         // general "any reason" app failure
         case Failed:
         // specific node failure
-        case Node_failure:
+        case Disk_failure:
 
           ++failed;
           ++failedRecently;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
index 8e8546b..5051aee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleStatus.java
@@ -21,7 +21,6 @@ package org.apache.slider.server.appmaster.state;
 import com.codahale.metrics.Metric;
 import com.codahale.metrics.MetricSet;
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.slider.api.types.ComponentInformation;
 import org.apache.slider.api.types.RoleStatistics;
@@ -35,8 +34,6 @@ import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Map;
 
-import static org.apache.hadoop.metrics2.lib.Interns.info;
-
 /**
  * Models the ongoing status of all nodes in an application.
  *
@@ -207,12 +204,16 @@ public final class RoleStatus implements MetricSet {
     return componentMetrics.containersDesired.value();
   }
 
-  long getRunning() {
+  public void setDesired(int desired) {
+    componentMetrics.containersDesired.set(desired);
+  }
+
+  public long getRunning() {
     return componentMetrics.containersRunning.value();
   }
 
-  public long getPending() {
-    return componentMetrics.containersPending.value();
+  public long getRequested() {
+    return componentMetrics.containersRequested.value();
   }
 
   public long getAAPending() {
@@ -222,22 +223,35 @@ public final class RoleStatus implements MetricSet {
   void decAAPending() {
     componentMetrics.pendingAAContainers.decr();
   }
+
   void setAAPending(long n) {
     componentMetrics.pendingAAContainers.set((int)n);
   }
 
-  long getFailedRecently() {
+  public long getLimitsExceeded() {
+    return componentMetrics.containersLimitsExceeded.value();
+  }
+
+  public long getPreempted() {
+    return componentMetrics.containersPreempted.value();
+  }
+
+  public long getDiskFailed() {
+    return componentMetrics.containersDiskFailure.value();
+  }
+
+  public long getFailedRecently() {
     return componentMetrics.failedSinceLastThreshold.value();
   }
 
-  long resetFailedRecently() {
+  public long resetFailedRecently() {
     long count =
         componentMetrics.failedSinceLastThreshold.value();
     componentMetrics.failedSinceLastThreshold.set(0);
     return count;
   }
 
-  long getFailed() {
+  public long getFailed() {
     return componentMetrics.containersFailed.value();
   }
 
@@ -254,6 +268,8 @@ public final class RoleStatus implements MetricSet {
     long inuse = getActualAndRequested();
     long delta = getDesired() - inuse;
     if (delta < 0) {
+      // TODO this doesn't do anything now that we're not tracking releasing
+      // containers -- maybe we need releasing
       //if we are releasing, remove the number that are already released.
       //but never switch to a positive
       delta = Math.min(delta, 0);
@@ -262,11 +278,11 @@ public final class RoleStatus implements MetricSet {
   }
 
   /**
-   * Get count of actual and requested containers. This includes pending ones
+   * Get count of actual and requested containers.
    * @return the size of the application when outstanding requests are included.
    */
   public long getActualAndRequested() {
-    return getRunning() + getPending();
+    return getRunning() + getRequested();
   }
 
   /**
@@ -341,6 +357,14 @@ public final class RoleStatus implements MetricSet {
   public synchronized RoleStatistics getStatistics() {
     RoleStatistics stats = new RoleStatistics();
     stats.activeAA = getOutstandingAARequestCount();
+    stats.actual = getRunning();
+    stats.desired = getDesired();
+    stats.failed = getFailed();
+    stats.limitsExceeded = getLimitsExceeded();
+    stats.nodeFailed = getDiskFailed();
+    stats.preempted = getPreempted();
+    stats.requested = getRequested();
+    stats.started = getRunning();
     return stats;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java
index 440094e..8dca4ed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java
@@ -156,7 +156,7 @@ public class IndexBlock extends SliderHamletBlock {
         } else {
           aatext = "";
         }
-        if (status.getPending() > 0) {
+        if (status.getRequested() > 0) {
           roleWithOpenRequest ++;
         }
       }
@@ -165,7 +165,7 @@ public class IndexBlock extends SliderHamletBlock {
         .td().a(nameUrl, roleName)._()
         .td(String.format("%d", metrics.containersDesired.value()))
         .td(String.format("%d", metrics.containersRunning.value()))
-        .td(String.format("%d", metrics.containersPending.value()))
+        .td(String.format("%d", metrics.containersRequested.value()))
         .td(String.format("%d", metrics.containersFailed.value()))
         .td(aatext)
         ._();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
index bfcab23..691f861 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
@@ -80,12 +80,16 @@ message UpgradeContainersRequestProto {
 message UpgradeContainersResponseProto {
 }
 
-message FlexComponentRequestProto {
+message FlexComponentsRequestProto {
+  repeated ComponentCountProto components = 1;
+}
+
+message ComponentCountProto {
   optional string name = 1;
-  optional int32 numberOfContainers = 2;
+  optional int64 numberOfContainers = 2;
 }
 
-message FlexComponentResponseProto {
+message FlexComponentsResponseProto {
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto
index f52d7a1..776ce28 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto
@@ -61,7 +61,7 @@ service SliderClusterProtocolPB {
   rpc upgradeContainers(UpgradeContainersRequestProto) 
     returns(UpgradeContainersResponseProto);
 
-  rpc flexComponent(FlexComponentRequestProto) returns (FlexComponentResponseProto);
+  rpc flexComponents(FlexComponentsRequestProto) returns (FlexComponentsResponseProto);
 
   /**
    * Get the current cluster status

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/api/TestRPCBinding.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/api/TestRPCBinding.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/api/TestRPCBinding.java
new file mode 100644
index 0000000..28483dc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/api/TestRPCBinding.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.slider.server.appmaster.rpc.RpcBinder;
+import org.apache.slider.server.appmaster.rpc.SliderClusterProtocolPB;
+import org.junit.Test;
+
+import java.net.InetSocketAddress;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Tests RPC work.
+ */
+public class TestRPCBinding {
+
+  @Test
+  public void testRegistration() throws Throwable {
+    Configuration conf = new Configuration();
+    RpcBinder.registerSliderAPI(conf);
+    assertTrue(RpcBinder.verifyBondedToProtobuf(conf,
+        SliderClusterProtocolPB.class));
+  }
+
+  @Test
+  public void testGetProxy() throws Throwable {
+    Configuration conf = new Configuration();
+    InetSocketAddress saddr = new InetSocketAddress("127.0.0.1", 9000);
+    SliderClusterProtocol proxy =
+        RpcBinder.connectToServer(saddr, null, conf, 1000);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBadArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBadArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBadArgs.java
new file mode 100644
index 0000000..6299a9c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBadArgs.java
@@ -0,0 +1,229 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.client;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.common.params.Arguments;
+import org.apache.slider.common.params.SliderActions;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
+import org.apache.slider.core.exceptions.ErrorStrings;
+import org.apache.slider.core.exceptions.UsageException;
+import org.apache.slider.utils.SliderTestBase;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Arrays;
+
+/**
+ * Test the argument parsing/validation logic.
+ */
+public class TestClientBadArgs extends SliderTestBase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestClientBadArgs.class);
+
+  @Test
+  public void testNoAction() throws Throwable {
+    launchExpectingException(SliderClient.class,
+                             createTestConfig(),
+                             "Usage: slider COMMAND",
+                             EMPTY_LIST);
+
+  }
+
+  @Test
+  public void testUnknownAction() throws Throwable {
+    launchExpectingException(SliderClient.class,
+                             createTestConfig(),
+                             "not-a-known-action",
+                             Arrays.asList("not-a-known-action"));
+  }
+
+  @Test
+  public void testActionWithoutOptions() throws Throwable {
+    launchExpectingException(SliderClient.class,
+                             createTestConfig(),
+                             "Usage: slider build <application>",
+                             Arrays.asList(SliderActions.ACTION_BUILD));
+  }
+
+  @Test
+  public void testActionWithoutEnoughArgs() throws Throwable {
+    launchExpectingException(SliderClient.class,
+                             createTestConfig(),
+                             ErrorStrings.ERROR_NOT_ENOUGH_ARGUMENTS,
+                             Arrays.asList(SliderActions.ACTION_START));
+  }
+
+  @Test
+  public void testActionWithTooManyArgs() throws Throwable {
+    launchExpectingException(SliderClient.class,
+                             createTestConfig(),
+                             ErrorStrings.ERROR_TOO_MANY_ARGUMENTS,
+                             Arrays.asList(SliderActions.ACTION_HELP,
+                             "hello, world"));
+  }
+
+  @Test
+  public void testBadImageArg() throws Throwable {
+    launchExpectingException(SliderClient.class,
+                             createTestConfig(),
+                             "Unknown option: --image",
+                            Arrays.asList(SliderActions.ACTION_HELP,
+                             Arguments.ARG_IMAGE));
+  }
+
+  @Test
+  public void testRegistryUsage() throws Throwable {
+    Throwable exception = launchExpectingException(SliderClient.class,
+        createTestConfig(),
+        "org.apache.slider.core.exceptions.UsageException: Argument --name " +
+            "missing",
+        Arrays.asList(SliderActions.ACTION_REGISTRY));
+    assertTrue(exception instanceof UsageException);
+    LOG.info(exception.toString());
+  }
+
+  @Test
+  public void testRegistryExportBadUsage1() throws Throwable {
+    Throwable exception = launchExpectingException(SliderClient.class,
+        createTestConfig(),
+        "Expected a value after parameter --getexp",
+        Arrays.asList(SliderActions.ACTION_REGISTRY,
+            Arguments.ARG_NAME,
+            "cl1",
+            Arguments.ARG_GETEXP));
+    assertTrue(exception instanceof BadCommandArgumentsException);
+    LOG.info(exception.toString());
+  }
+
+  @Test
+  public void testRegistryExportBadUsage2() throws Throwable {
+    Throwable exception = launchExpectingException(SliderClient.class,
+        createTestConfig(),
+        "Expected a value after parameter --getexp",
+        Arrays.asList(SliderActions.ACTION_REGISTRY,
+            Arguments.ARG_NAME,
+            "cl1",
+            Arguments.ARG_LISTEXP,
+        Arguments.ARG_GETEXP));
+    assertTrue(exception instanceof BadCommandArgumentsException);
+    LOG.info(exception.toString());
+  }
+
+  @Test
+  public void testRegistryExportBadUsage3() throws Throwable {
+    Throwable exception = launchExpectingException(SliderClient.class,
+        createTestConfig(),
+        "Usage: registry",
+        Arrays.asList(SliderActions.ACTION_REGISTRY,
+            Arguments.ARG_NAME,
+            "cl1",
+            Arguments.ARG_LISTEXP,
+            Arguments.ARG_GETEXP,
+            "export1"));
+    assertTrue(exception instanceof UsageException);
+    LOG.info(exception.toString());
+  }
+
+  @Test
+  public void testUpgradeUsage() throws Throwable {
+    Throwable exception = launchExpectingException(SliderClient.class,
+        createTestConfig(),
+        "org.apache.slider.core.exceptions.BadCommandArgumentsException: Not " +
+            "enough arguments for action: upgrade Expected minimum 1 but got 0",
+        Arrays.asList(SliderActions.ACTION_UPGRADE));
+    assertTrue(exception instanceof BadCommandArgumentsException);
+    LOG.info(exception.toString());
+  }
+
+  public Configuration createTestConfig() {
+    Configuration configuration = new Configuration();
+    configuration.set(YarnConfiguration.RM_ADDRESS,  "127.0.0.1:8032");
+    return configuration;
+  }
+
+  @Ignore
+  @Test
+  public void testUpgradeWithTemplateResourcesAndContainersOption() throws
+      Throwable {
+    //TODO test upgrade args
+    String appName = "test_hbase";
+    Throwable exception = launchExpectingException(SliderClient.class,
+        createTestConfig(),
+        "BadCommandArgumentsException: Option --containers cannot be "
+        + "specified with --appdef",
+        Arrays.asList(SliderActions.ACTION_UPGRADE,
+            appName,
+            Arguments.ARG_APPDEF,
+            "/tmp/app.json",
+            Arguments.ARG_CONTAINERS,
+            "container_1"
+        ));
+    assertTrue(exception instanceof BadCommandArgumentsException);
+    LOG.info(exception.toString());
+  }
+
+  @Ignore
+  @Test
+  public void testUpgradeWithTemplateResourcesAndComponentsOption() throws
+      Throwable {
+    //TODO test upgrade args
+    String appName = "test_hbase";
+    Throwable exception = launchExpectingException(SliderClient.class,
+        createTestConfig(),
+        "BadCommandArgumentsException: Option --components cannot be "
+        + "specified with --appdef",
+        Arrays.asList(SliderActions.ACTION_UPGRADE,
+            appName,
+            Arguments.ARG_APPDEF,
+            "/tmp/app.json",
+            Arguments.ARG_COMPONENTS,
+            "HBASE_MASTER"
+        ));
+    assertTrue(exception instanceof BadCommandArgumentsException);
+    LOG.info(exception.toString());
+  }
+
+  @Test
+  public void testNodesMissingFile() throws Throwable {
+    Throwable exception = launchExpectingException(SliderClient.class,
+        createTestConfig(),
+        "after parameter --out",
+        Arrays.asList(SliderActions.ACTION_NODES, Arguments.ARG_OUTPUT));
+    assertTrue(exception instanceof BadCommandArgumentsException);
+  }
+
+  @Test
+  public void testFlexWithNoComponents() throws Throwable {
+    Throwable exception = launchExpectingException(SliderClient.class,
+        new Configuration(),
+        "Usage: slider flex <application>",
+        Arrays.asList(
+            SliderActions.ACTION_FLEX,
+            "flex1",
+            Arguments.ARG_DEFINE,
+            YarnConfiguration.RM_ADDRESS + "=127.0.0.1:8032"
+        ));
+    assertTrue(exception instanceof UsageException);
+    LOG.info(exception.toString());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBasicArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBasicArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBasicArgs.java
new file mode 100644
index 0000000..43c5163
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBasicArgs.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.client;
+
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.common.params.Arguments;
+import org.apache.slider.common.params.ClientArgs;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.main.ServiceLauncher;
+import org.apache.slider.utils.SliderTestBase;
+import org.junit.Test;
+
+import java.net.UnknownHostException;
+import java.util.Arrays;
+
+/**
+ * Test bad argument handling.
+ */
+public class TestClientBasicArgs extends SliderTestBase {
+
+  /**
+   * Help should print out help string and then succeed.
+   * @throws Throwable
+   */
+  @Test
+  public void testHelp() throws Throwable {
+    ServiceLauncher launcher = launch(SliderClient.class,
+                                      SliderUtils.createConfiguration(),
+                                      Arrays.asList(ClientArgs.ACTION_HELP));
+    assertEquals(0, launcher.getServiceExitCode());
+  }
+
+  @Test
+  public void testNoArgs() throws Throwable {
+    launchExpectingException(SliderClient.class,
+                                        SliderUtils.createConfiguration(),
+                                        "Usage: slider COMMAND",
+                                        EMPTY_LIST);
+  }
+
+  @Test
+  public void testListUnknownRM() throws Throwable {
+    try {
+      YarnConfiguration conf = SliderUtils.createConfiguration();
+      conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,
+          1000);
+      conf.setLong(YarnConfiguration
+          .RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, 1000);
+      ServiceLauncher launcher = launch(SliderClient.class,
+                                        conf,
+                                        Arrays.asList(
+                                        ClientArgs.ACTION_LIST,
+                                        "cluster",
+                                        Arguments.ARG_MANAGER,
+                                        "badhost:8888"));
+      fail("expected an exception, got a launcher with exit code " +
+          launcher.getServiceExitCode());
+    } catch (UnknownHostException expected) {
+      //expected
+    }
+
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestCommonArgParsing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestCommonArgParsing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestCommonArgParsing.java
new file mode 100644
index 0000000..ec6dbb8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestCommonArgParsing.java
@@ -0,0 +1,522 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.client;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.api.RoleKeys;
+import org.apache.slider.common.SliderXmlConfKeys;
+import org.apache.slider.common.params.AbstractClusterBuildingActionArgs;
+import org.apache.slider.common.params.ActionBuildArgs;
+import org.apache.slider.common.params.ActionCreateArgs;
+import org.apache.slider.common.params.ActionDestroyArgs;
+import org.apache.slider.common.params.ActionExistsArgs;
+import org.apache.slider.common.params.ActionFlexArgs;
+import org.apache.slider.common.params.ActionFreezeArgs;
+import org.apache.slider.common.params.ActionListArgs;
+import org.apache.slider.common.params.ActionStatusArgs;
+import org.apache.slider.common.params.ActionThawArgs;
+import org.apache.slider.common.params.ActionUpdateArgs;
+import org.apache.slider.common.params.ArgOps;
+import org.apache.slider.common.params.Arguments;
+import org.apache.slider.common.params.ClientArgs;
+import org.apache.slider.common.params.SliderActions;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
+import org.apache.slider.core.exceptions.ErrorStrings;
+import org.apache.slider.core.exceptions.SliderException;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test handling of common arguments, specifically how things get split up.
+ */
+public class TestCommonArgParsing implements SliderActions, Arguments {
+
+
+  public static final String CLUSTERNAME = "clustername";
+
+  @Test
+  public void testCreateActionArgs() throws Throwable {
+    ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_CREATE,
+        "cluster1"));
+    assertEquals("cluster1", clientArgs.getClusterName());
+  }
+
+  @Test
+  public void testCreateFailsNoClustername() throws Throwable {
+    assertParseFails(Arrays.asList(ACTION_CREATE));
+  }
+
+  @Test
+  public void testCreateFailsTwoClusternames() throws Throwable {
+    assertParseFails(Arrays.asList(
+        ACTION_CREATE,
+        "c1",
+        "c2"
+    ));
+  }
+
+  @Test
+  public void testHelp() throws Throwable {
+    ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_HELP));
+    assertNull(clientArgs.getClusterName());
+  }
+
+  @Test
+  public void testSliderBasePath() throws Throwable {
+    ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_LIST,
+        ARG_BASE_PATH,  "/projects/slider/clusters"));
+    assertEquals(new Path("/projects/slider/clusters"),
+        clientArgs.getBasePath());
+  }
+
+  @Test
+  public void testNoSliderBasePath() throws Throwable {
+    ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_LIST));
+    assertNull(clientArgs.getBasePath());
+  }
+
+  @Test
+  public void testListNoClusternames() throws Throwable {
+    ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_LIST));
+    assertNull(clientArgs.getClusterName());
+  }
+
+  @Test
+  public void testListNoClusternamesDefinition() throws Throwable {
+    ClientArgs clientArgs = createClientArgs(Arrays.asList(
+        ACTION_LIST,
+        ARG_DEFINE,
+        "fs.default.FS=file://localhost"
+        ));
+    assertNull(clientArgs.getClusterName());
+  }
+
+  @Test
+  public void testList1Clustername() throws Throwable {
+    ClientArgs ca = createClientArgs(Arrays.asList(ACTION_LIST, "cluster1"));
+    assertEquals("cluster1", ca.getClusterName());
+    assertTrue(ca.getCoreAction() instanceof ActionListArgs);
+  }
+
+  @Test
+  public void testListFailsTwoClusternames() throws Throwable {
+    assertParseFails(Arrays.asList(
+        ACTION_LIST,
+        "c1",
+        "c2"
+      ));
+  }
+
+  @Test
+  public void testDefinitions() throws Throwable {
+    ClientArgs ca = createClientArgs(Arrays.asList(
+        ACTION_CREATE,
+        CLUSTERNAME,
+        "-D", "yarn.resourcemanager.principal=yarn/server@LOCAL",
+        "-D", "dfs.datanode.kerberos.principal=hdfs/server@LOCAL"
+    ));
+    Configuration conf = new Configuration(false);
+    ca.applyDefinitions(conf);
+    assertEquals(CLUSTERNAME, ca.getClusterName());
+    assertNull(conf.get(SliderXmlConfKeys.KEY_SLIDER_BASE_PATH));
+    SliderUtils.verifyPrincipalSet(conf, YarnConfiguration.RM_PRINCIPAL);
+    SliderUtils.verifyPrincipalSet(
+        conf,
+        SliderXmlConfKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY);
+
+  }
+
+  @Test
+  public void testDefinitionsSettingBaseSliderDir() throws Throwable {
+    ClientArgs ca = createClientArgs(Arrays.asList(
+        ACTION_CREATE,
+        CLUSTERNAME,
+        "--basepath", "/projects/slider/clusters",
+        "-D", "yarn.resourcemanager.principal=yarn/server@LOCAL",
+        "-D", "dfs.datanode.kerberos.principal=hdfs/server@LOCAL"
+    ));
+    Configuration conf = new Configuration(false);
+    ca.applyDefinitions(conf);
+    assertEquals(CLUSTERNAME, ca.getClusterName());
+    assertEquals("/projects/slider/clusters", conf.get(SliderXmlConfKeys
+        .KEY_SLIDER_BASE_PATH));
+    SliderUtils.verifyPrincipalSet(conf, YarnConfiguration.RM_PRINCIPAL);
+    SliderUtils.verifyPrincipalSet(conf, SliderXmlConfKeys
+        .DFS_DATANODE_KERBEROS_PRINCIPAL_KEY);
+
+  }
+
+  /**
+   * Test a start command.
+   * @throws Throwable
+   */
+  @Test
+  public void testComplexThaw() throws Throwable {
+    ClientArgs ca = createClientArgs(Arrays.asList(
+        ACTION_START,
+        "--manager", "rhel:8032",
+        "--filesystem", "hdfs://rhel:9090",
+        "-S", "java.security.krb5.realm=LOCAL",
+        "-S", "java.security.krb5.kdc=rhel",
+        "-D", "yarn.resourcemanager.principal=yarn/rhel@LOCAL",
+        "-D", "namenode.resourcemanager.principal=hdfs/rhel@LOCAL",
+        "cl1"
+    ));
+    assertEquals("cl1", ca.getClusterName());
+    assertTrue(ca.getCoreAction() instanceof ActionThawArgs);
+  }
+
+  /**
+   * Test a force kill command where the app comes at the end of the line.
+   * @throws Throwable
+   *
+   */
+  @Test
+  public void testStatusSplit() throws Throwable {
+
+    String appId = "application_1381252124398_0013";
+    ClientArgs ca = createClientArgs(Arrays.asList(
+        ACTION_STATUS,
+        "--manager", "rhel:8032",
+        "--filesystem", "hdfs://rhel:9090",
+        "-S", "java.security.krb5.realm=LOCAL",
+        "-S", "java.security.krb5.kdc=rhel",
+        "-D", "yarn.resourcemanager.principal=yarn/rhel@LOCAL",
+        "-D", "namenode.resourcemanager.principal=hdfs/rhel@LOCAL",
+        appId
+    ));
+    assertEquals(appId, ca.getClusterName());
+  }
+
+  @Test
+  public void testFreezeFailsNoArg() throws Throwable {
+    assertParseFails(Arrays.asList(
+        ACTION_STOP
+    ));
+  }
+
+  @Test
+  public void testFreezeWorks1Arg() throws Throwable {
+    ClientArgs ca = createClientArgs(Arrays.asList(
+        ACTION_STOP,
+        CLUSTERNAME
+    ));
+    assertEquals(CLUSTERNAME, ca.getClusterName());
+    assertTrue(ca.getCoreAction() instanceof ActionFreezeArgs);
+  }
+
+  @Test
+  public void testFreezeFails2Arg() throws Throwable {
+    assertParseFails(Arrays.asList(
+        ACTION_STOP, "cluster", "cluster2"
+    ));
+  }
+
+  @Test
+  public void testFreezeForceWaitAndMessage() throws Throwable {
+    ClientArgs ca = createClientArgs(Arrays.asList(
+        ACTION_STOP, CLUSTERNAME,
+        ARG_FORCE,
+        ARG_WAIT, "0",
+        ARG_MESSAGE, "explanation"
+    ));
+    assertEquals(CLUSTERNAME, ca.getClusterName());
+    assertTrue(ca.getCoreAction() instanceof ActionFreezeArgs);
+    ActionFreezeArgs freezeArgs = (ActionFreezeArgs) ca.getCoreAction();
+    assertEquals("explanation", freezeArgs.message);
+    assertTrue(freezeArgs.force);
+  }
+
+  @Test
+  public void testGetStatusWorks1Arg() throws Throwable {
+    ClientArgs ca = createClientArgs(Arrays.asList(
+        ACTION_STATUS,
+        CLUSTERNAME
+    ));
+    assertEquals(CLUSTERNAME, ca.getClusterName());
+    assertTrue(ca.getCoreAction() instanceof ActionStatusArgs);
+  }
+
+  @Test
+  public void testExistsWorks1Arg() throws Throwable {
+    ClientArgs ca = createClientArgs(Arrays.asList(
+        ACTION_EXISTS,
+        CLUSTERNAME,
+        ARG_LIVE
+    ));
+    assertEquals(CLUSTERNAME, ca.getClusterName());
+    assertTrue(ca.getCoreAction() instanceof ActionExistsArgs);
+    assertTrue(ca.getActionExistsArgs().live);
+  }
+
+  @Test
+  public void testDestroy1Arg() throws Throwable {
+    ClientArgs ca = createClientArgs(Arrays.asList(
+        ACTION_DESTROY,
+        CLUSTERNAME
+    ));
+    assertEquals(CLUSTERNAME, ca.getClusterName());
+    assertTrue(ca.getCoreAction() instanceof ActionDestroyArgs);
+  }
+
+  /**
+   * Assert that a pass fails with a BadCommandArgumentsException.
+   * @param argsList
+   */
+
+  private void assertParseFails(List argsList) throws SliderException {
+    try {
+      ClientArgs clientArgs = createClientArgs(argsList);
+      Assert.fail("exected an exception, got " + clientArgs);
+    } catch (BadCommandArgumentsException ignored) {
+      //expected
+    }
+  }
+
+  /**
+   * Build and parse client args, after adding the base args list.
+   * @param argsList
+   */
+  public ClientArgs createClientArgs(List<String> argsList)
+      throws SliderException {
+    ClientArgs serviceArgs = new ClientArgs(argsList);
+    serviceArgs.parse();
+    return serviceArgs;
+  }
+
+  public ActionCreateArgs createAction(List<String> argsList)
+      throws SliderException {
+    ClientArgs ca = createClientArgs(argsList);
+    assertEquals(ACTION_CREATE, ca.getAction());
+    ActionCreateArgs args = ca.getActionCreateArgs();
+    assertNotNull(args);
+    return args;
+  }
+
+  @Test
+  public void testSingleRoleArg() throws Throwable {
+    ActionCreateArgs createArgs = createAction(Arrays.asList(
+        ACTION_CREATE, "cluster1",
+        ARG_COMPONENT, "master", "5"
+    ));
+    List<String> tuples = createArgs.getComponentTuples();
+    assertEquals(2, tuples.size());
+    Map<String, String> roleMap = ArgOps.convertTupleListToMap("roles", tuples);
+    assertEquals("5", roleMap.get("master"));
+  }
+
+  @Test
+  public void testNoRoleArg() throws Throwable {
+    ActionCreateArgs createArgs = createAction(Arrays.asList(
+        ACTION_CREATE, "cluster1"
+    ));
+    List<String> tuples = createArgs.getComponentTuples();
+    Map<String, String> roleMap = ArgOps.convertTupleListToMap("roles", tuples);
+    assertNull(roleMap.get("master"));
+  }
+
+
+  @Test
+  public void testMultiRoleArgBuild() throws Throwable {
+    ClientArgs ca = createClientArgs(Arrays.asList(
+        ACTION_BUILD, "cluster1",
+        ARG_COMPONENT, "master", "1",
+        ARG_COMPONENT, "worker", "2"
+    ));
+    assertEquals(ACTION_BUILD, ca.getAction());
+    assertTrue(ca.getCoreAction() instanceof ActionBuildArgs);
+    assertTrue(ca.getBuildingActionArgs() instanceof ActionBuildArgs);
+    AbstractClusterBuildingActionArgs args = ca.getActionBuildArgs();
+    List<String> tuples = args.getComponentTuples();
+    assertEquals(4, tuples.size());
+    Map<String, String> roleMap = ArgOps.convertTupleListToMap("roles", tuples);
+    assertEquals("1", roleMap.get("master"));
+    assertEquals("2", roleMap.get("worker"));
+  }
+
+  @Test
+  public void testArgUpdate() throws Throwable {
+    ClientArgs ca = createClientArgs(Arrays.asList(
+        ACTION_UPDATE, "cluster1",
+        ARG_APPDEF, "app.json"
+    ));
+    assertEquals(ACTION_UPDATE, ca.getAction());
+    assertTrue(ca.getCoreAction() instanceof ActionUpdateArgs);
+    assertTrue(ca.getActionUpdateArgs() instanceof ActionUpdateArgs);
+    AbstractClusterBuildingActionArgs args = ca.getActionUpdateArgs();
+    assertNotNull(args.appDef);
+  }
+
+  @Test
+  public void testFlexArgs() throws Throwable {
+    ClientArgs ca = createClientArgs(Arrays.asList(
+        ACTION_FLEX, "cluster1",
+        ARG_COMPONENT, "master", "1",
+        ARG_COMPONENT, "worker", "2"
+    ));
+    assertTrue(ca.getCoreAction() instanceof ActionFlexArgs);
+    List<String> tuples = ca.getActionFlexArgs().getComponentTuples();
+    assertEquals(4, tuples.size());
+    Map<String, String> roleMap = ArgOps.convertTupleListToMap("roles", tuples);
+    assertEquals("1", roleMap.get("master"));
+    assertEquals("2", roleMap.get("worker"));
+  }
+
+  @Test
+  public void testDuplicateRole() throws Throwable {
+    ActionCreateArgs createArgs = createAction(Arrays.asList(
+        ACTION_CREATE, "cluster1",
+        ARG_COMPONENT, "master", "1",
+        ARG_COMPONENT, "master", "2"
+    ));
+    List<String> tuples = createArgs.getComponentTuples();
+    assertEquals(4, tuples.size());
+    try {
+      Map<String, String> roleMap = ArgOps.convertTupleListToMap(
+          "roles",
+          tuples);
+      Assert.fail("got a role map $roleMap not a failure");
+    } catch (BadCommandArgumentsException expected) {
+      assertTrue(expected.getMessage().contains(ErrorStrings
+          .ERROR_DUPLICATE_ENTRY));
+    }
+  }
+
+  @Test
+  public void testOddRoleCount() throws Throwable {
+    ActionCreateArgs createArgs = createAction(Arrays.asList(
+        ACTION_CREATE, "cluster1",
+        ARG_COMPONENT, "master", "1",
+        ARG_COMPONENT, "master", "2"
+    ));
+    List<String> tuples = createArgs.getComponentTuples();
+    tuples.add("loggers");
+    assertEquals(5, tuples.size());
+    try {
+      Map<String, String> roleMap = ArgOps.convertTupleListToMap("roles",
+          tuples);
+      Assert.fail("got a role map " + roleMap + " not a failure");
+    } catch (BadCommandArgumentsException expected) {
+      assertTrue(expected.getMessage().contains(ErrorStrings
+          .ERROR_PARSE_FAILURE));
+    }
+  }
+
+  /**
+   * Create some role-opt client args, so that multiple tests can use it.
+   * @return the args
+   */
+  public ActionCreateArgs createRoleOptClientArgs() throws SliderException {
+    ActionCreateArgs createArgs = createAction(Arrays.asList(
+        ACTION_CREATE, "cluster1",
+        ARG_COMPONENT, "master", "1",
+        ARG_COMP_OPT, "master", "cheese", "swiss",
+        ARG_COMP_OPT, "master", "env.CHEESE", "cheddar",
+        ARG_COMP_OPT, "master", ResourceKeys.YARN_CORES, "3",
+
+        ARG_COMPONENT, "worker", "2",
+        ARG_COMP_OPT, "worker", ResourceKeys.YARN_CORES, "2",
+        ARG_COMP_OPT, "worker", RoleKeys.JVM_HEAP, "65536",
+        ARG_COMP_OPT, "worker", "env.CHEESE", "stilton"
+    ));
+    return createArgs;
+  }
+
+  @Test
+  public void testRoleOptionParse() throws Throwable {
+    ActionCreateArgs createArgs = createRoleOptClientArgs();
+    Map<String, Map<String, String>> tripleMaps = createArgs.getCompOptionMap();
+    Map<String, String> workerOpts = tripleMaps.get("worker");
+    assertEquals(3, workerOpts.size());
+    assertEquals("2", workerOpts.get(ResourceKeys.YARN_CORES));
+    assertEquals("65536", workerOpts.get(RoleKeys.JVM_HEAP));
+
+    Map<String, String> masterOpts = tripleMaps.get("master");
+    assertEquals(3, masterOpts.size());
+    assertEquals("3", masterOpts.get(ResourceKeys.YARN_CORES));
+
+  }
+
+  @Test
+  public void testRoleOptionsMerge() throws Throwable {
+    ActionCreateArgs createArgs = createRoleOptClientArgs();
+
+    Map<String, Map<String, String>> roleOpts = createArgs.getCompOptionMap();
+
+    Map<String, Map<String, String>> clusterRoleMap = createEnvMap();
+    SliderUtils.applyCommandLineRoleOptsToRoleMap(clusterRoleMap, roleOpts);
+
+    Map<String, String> masterOpts = clusterRoleMap.get("master");
+    assertEquals("swiss", masterOpts.get("cheese"));
+
+    Map<String, String> workerOpts = clusterRoleMap.get("worker");
+    assertEquals("stilton", workerOpts.get("env.CHEESE"));
+  }
+
+  @Test
+  public void testEnvVariableApply() throws Throwable {
+    ActionCreateArgs createArgs = createRoleOptClientArgs();
+
+
+    Map<String, Map<String, String>> roleOpts = createArgs.getCompOptionMap();
+
+    Map<String, Map<String, String>> clusterRoleMap = createEnvMap();
+    SliderUtils.applyCommandLineRoleOptsToRoleMap(clusterRoleMap, roleOpts);
+
+    Map<String, String> workerOpts = clusterRoleMap.get("worker");
+    assertEquals("stilton", workerOpts.get("env.CHEESE"));
+
+    Map<String, String> envmap = SliderUtils.buildEnvMap(workerOpts);
+    assertEquals("stilton", envmap.get("CHEESE"));
+
+  }
+
+  /**
+   * Static compiler complaining about matching LinkedHashMap with Map,
+   * so some explicit creation here.
+   * @return a map of maps
+   */
+  public Map<String, Map<String, String>> createEnvMap() {
+
+    Map<String, String> cheese = new HashMap<>();
+    cheese.put("cheese", "french");
+    Map<String, String> envCheese = new HashMap<>();
+    envCheese.put("env.CHEESE", "french");
+    Map<String, Map<String, String>> envMap = new HashMap<>();
+    envMap.put("master", cheese);
+    envMap.put("worker", envCheese);
+    return envMap;
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestKeytabCommandOptions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestKeytabCommandOptions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestKeytabCommandOptions.java
new file mode 100644
index 0000000..07d8c10
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestKeytabCommandOptions.java
@@ -0,0 +1,405 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.client;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
+import org.apache.slider.common.params.Arguments;
+import org.apache.slider.common.params.ClientArgs;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.core.main.ServiceLauncher;
+import org.apache.slider.utils.SliderTestBase;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ * Test a keytab installation.
+ */
+public class TestKeytabCommandOptions extends SliderTestBase {
+
+  private static SliderFileSystem testFileSystem;
+
+  @Before
+  public void setupFilesystem() throws IOException {
+    org.apache.hadoop.fs.FileSystem fileSystem = new RawLocalFileSystem();
+    YarnConfiguration configuration = SliderUtils.createConfiguration();
+    fileSystem.setConf(configuration);
+    testFileSystem = new SliderFileSystem(fileSystem, configuration);
+    File testFolderDir = new File(testFileSystem
+        .buildKeytabInstallationDirPath("").toUri().getPath());
+    FileUtils.deleteDirectory(testFolderDir);
+  }
+
+  @Test
+  public void testInstallKeytab() throws Throwable {
+    // create a mock keytab file
+    File localKeytab =
+        FileUtil.createLocalTempFile(getTempLocation(), "test", true);
+    String contents = UUID.randomUUID().toString();
+    FileUtils.write(localKeytab, contents);
+    YarnConfiguration conf = SliderUtils.createConfiguration();
+    ServiceLauncher launcher = launch(TestSliderClient.class,
+                                      conf,
+                                      Arrays.asList(
+                                          ClientArgs.ACTION_KEYTAB,
+                                          ClientArgs.ARG_KEYTABINSTALL,
+                                          ClientArgs.ARG_KEYTAB,
+                                          localKeytab.getAbsolutePath(),
+                                          Arguments.ARG_FOLDER,
+                                          "testFolder"));
+    Path installedPath = new Path(testFileSystem
+        .buildKeytabInstallationDirPath("testFolder"), localKeytab.getName());
+    File installedKeytab = new File(installedPath.toUri().getPath());
+    assertTrue(installedKeytab.exists());
+    assertEquals(FileUtils.readFileToString(installedKeytab),
+        FileUtils.readFileToString(localKeytab));
+  }
+
+  @Test
+  public void testInstallThenDeleteKeytab() throws Throwable {
+    // create a mock keytab file
+    File localKeytab =
+        FileUtil.createLocalTempFile(getTempLocation(), "test", true);
+    String contents = UUID.randomUUID().toString();
+    FileUtils.write(localKeytab, contents);
+    YarnConfiguration conf = SliderUtils.createConfiguration();
+    ServiceLauncher launcher = launch(TestSliderClient.class,
+                                      conf,
+                                      Arrays.asList(
+                                          ClientArgs.ACTION_KEYTAB,
+                                          ClientArgs.ARG_KEYTABINSTALL,
+                                          ClientArgs.ARG_KEYTAB,
+                                          localKeytab.getAbsolutePath(),
+                                          Arguments.ARG_FOLDER,
+                                          "testFolder"));
+    Path installedPath = new Path(testFileSystem
+        .buildKeytabInstallationDirPath("testFolder"), localKeytab.getName());
+    File installedKeytab = new File(installedPath.toUri().getPath());
+    assertTrue(installedKeytab.exists());
+    assertEquals(FileUtils.readFileToString(installedKeytab),
+        FileUtils.readFileToString(localKeytab));
+
+    launcher = launch(TestSliderClient.class,
+                      conf,
+                      Arrays.asList(
+                          ClientArgs.ACTION_KEYTAB,
+                          ClientArgs.ARG_KEYTABDELETE,
+                          ClientArgs.ARG_KEYTAB,
+                          localKeytab.getName(),
+                          Arguments.ARG_FOLDER,
+                          "testFolder"));
+
+    assertFalse(installedKeytab.exists());
+
+  }
+
+  @Test
+  public void testInstallThenListKeytab() throws Throwable {
+    // create a mock keytab file
+    File localKeytab =
+        FileUtil.createLocalTempFile(getTempLocation(), "test", true);
+    String contents = UUID.randomUUID().toString();
+    FileUtils.write(localKeytab, contents);
+    YarnConfiguration conf = SliderUtils.createConfiguration();
+    ServiceLauncher launcher = launch(TestSliderClient.class,
+                                      conf,
+                                      Arrays.asList(
+                                          ClientArgs.ACTION_KEYTAB,
+                                          ClientArgs.ARG_KEYTABINSTALL,
+                                          ClientArgs.ARG_KEYTAB,
+                                          localKeytab.getAbsolutePath(),
+                                          Arguments.ARG_FOLDER,
+                                          "testFolder"));
+    Path installedPath = new Path(testFileSystem
+        .buildKeytabInstallationDirPath("testFolder"), localKeytab.getName());
+    File installedKeytab = new File(installedPath.toUri().getPath());
+    assertTrue(installedKeytab.exists());
+    assertEquals(FileUtils.readFileToString(installedKeytab),
+        FileUtils.readFileToString(localKeytab));
+
+    // install an additional copy into another folder to test listing
+    launcher = launch(TestSliderClient.class,
+                      conf,
+                      Arrays.asList(
+                          ClientArgs.ACTION_KEYTAB,
+                          ClientArgs.ARG_KEYTABINSTALL,
+                          ClientArgs.ARG_KEYTAB,
+                          localKeytab.getAbsolutePath(),
+                          Arguments.ARG_FOLDER,
+                          "testFolder2"));
+
+    TestAppender testAppender = new TestAppender();
+
+    Logger.getLogger(SliderClient.class).addAppender(testAppender);
+
+    try {
+      launcher = launch(TestSliderClient.class,
+                        conf,
+                        Arrays.asList(
+                            ClientArgs.ACTION_KEYTAB,
+                            ClientArgs.ARG_KEYTABLIST)
+      );
+      assertEquals(3, testAppender.events.size());
+      String msg = (String) testAppender.events.get(1).getMessage();
+      assertTrue(msg.contains("/.slider/keytabs/testFolder"));
+      assertTrue(msg.endsWith(installedKeytab.getName()));
+      msg = (String) testAppender.events.get(2).getMessage();
+      assertTrue(msg.contains("/.slider/keytabs/testFolder"));
+      assertTrue(msg.endsWith(installedKeytab.getName()));
+    } finally {
+      Logger.getLogger(SliderClient.class).removeAppender(testAppender);
+    }
+
+    // now listing while specifying the folder name
+    testAppender = new TestAppender();
+
+    Logger.getLogger(SliderClient.class).addAppender(testAppender);
+
+    try {
+      launcher = launch(TestSliderClient.class,
+                        conf,
+                        Arrays.asList(
+                            ClientArgs.ACTION_KEYTAB,
+                            ClientArgs.ARG_KEYTABLIST,
+                            Arguments.ARG_FOLDER,
+                            "testFolder"));
+      assertEquals(2, testAppender.events.size());
+      String msg = (String) testAppender.events.get(1).getMessage();
+      assertTrue(msg.contains("/.slider/keytabs/testFolder/" +
+          installedKeytab.getName()));
+    } finally {
+      Logger.getLogger(SliderClient.class).removeAppender(testAppender);
+    }
+  }
+
+  @Test
+  public void testDeleteNonExistentKeytab() throws Throwable {
+    // create a mock keytab file
+    YarnConfiguration conf = SliderUtils.createConfiguration();
+    try {
+      ServiceLauncher launcher = launch(TestSliderClient.class,
+                                        conf,
+                                        Arrays.asList(
+                                            ClientArgs.ACTION_KEYTAB,
+                                            ClientArgs.ARG_KEYTABDELETE,
+                                            ClientArgs.ARG_KEYTAB,
+                                            "HeyIDontExist.keytab",
+                                            Arguments.ARG_FOLDER,
+                                            "testFolder"));
+      fail("expected BadCommandArgumentsException from launch");
+    } catch (BadCommandArgumentsException e) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testInstallKeytabWithNoFolder() throws Throwable {
+    // create a mock keytab file
+    File localKeytab =
+        FileUtil.createLocalTempFile(getTempLocation(), "test", true);
+    String contents = UUID.randomUUID().toString();
+    FileUtils.write(localKeytab, contents);
+    YarnConfiguration conf = SliderUtils.createConfiguration();
+    try {
+      ServiceLauncher launcher = launch(TestSliderClient.class,
+                                        conf,
+                                        Arrays.asList(
+                                            ClientArgs.ACTION_KEYTAB,
+                                            ClientArgs.ARG_KEYTABINSTALL,
+                                            ClientArgs.ARG_KEYTAB,
+                                            localKeytab.getAbsolutePath()));
+      fail("expected BadCommandArgumentsException from launch");
+    } catch (BadCommandArgumentsException e) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testInstallKeytabWithNoKeytab() throws Throwable {
+    // create a mock keytab file
+    File localKeytab =
+        FileUtil.createLocalTempFile(getTempLocation(), "test", true);
+    String contents = UUID.randomUUID().toString();
+    FileUtils.write(localKeytab, contents);
+    YarnConfiguration conf = SliderUtils.createConfiguration();
+    try {
+      ServiceLauncher launcher = launch(TestSliderClient.class,
+                                        conf,
+                                        Arrays.asList(
+                                            ClientArgs.ACTION_KEYTAB,
+                                            ClientArgs.ARG_KEYTABINSTALL,
+                                            ClientArgs.ARG_FOLDER,
+                                            "testFolder"));
+      fail("expected BadCommandArgumentsException from launch");
+    } catch (BadCommandArgumentsException e) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testInstallKeytabAllowingOverwrite() throws Throwable {
+    // create a mock keytab file
+    File localKeytab =
+        FileUtil.createLocalTempFile(getTempLocation(), "test", true);
+    String contents = UUID.randomUUID().toString();
+    FileUtils.write(localKeytab, contents);
+    YarnConfiguration conf = SliderUtils.createConfiguration();
+    ServiceLauncher launcher = launch(TestSliderClient.class,
+                                      conf,
+                                      Arrays.asList(
+                                          ClientArgs.ACTION_KEYTAB,
+                                          ClientArgs.ARG_KEYTABINSTALL,
+                                          ClientArgs.ARG_KEYTAB,
+                                          localKeytab.getAbsolutePath(),
+                                          Arguments.ARG_FOLDER,
+                                          "testFolder"));
+    Path installedPath = new Path(testFileSystem
+        .buildKeytabInstallationDirPath("testFolder"), localKeytab.getName());
+    File installedKeytab = new File(installedPath.toUri().getPath());
+    assertTrue(installedKeytab.exists());
+    assertEquals(FileUtils.readFileToString(installedKeytab), FileUtils
+        .readFileToString(localKeytab));
+    launcher = launch(TestSliderClient.class,
+                      conf,
+                      Arrays.asList(
+                          ClientArgs.ACTION_KEYTAB,
+                          ClientArgs.ARG_KEYTABINSTALL,
+                          ClientArgs.ARG_KEYTAB,
+                          localKeytab.getAbsolutePath(),
+                          Arguments.ARG_FOLDER,
+                          "testFolder",
+                          Arguments.ARG_OVERWRITE)
+    );
+    assertTrue(installedKeytab.exists());
+    assertEquals(FileUtils.readFileToString(installedKeytab),
+        FileUtils.readFileToString(localKeytab));
+  }
+
+  @Test
+  public void testInstallKeytabNotAllowingOverwrite() throws Throwable {
+    // create a mock keytab file
+    File localKeytab =
+        FileUtil.createLocalTempFile(getTempLocation(), "test", true);
+    String contents = UUID.randomUUID().toString();
+    FileUtils.write(localKeytab, contents);
+    YarnConfiguration conf = SliderUtils.createConfiguration();
+    ServiceLauncher launcher = launch(TestSliderClient.class,
+                                      conf,
+                                      Arrays.asList(
+                                          ClientArgs.ACTION_KEYTAB,
+                                          ClientArgs.ARG_KEYTABINSTALL,
+                                          ClientArgs.ARG_KEYTAB,
+                                          localKeytab.getAbsolutePath(),
+                                          Arguments.ARG_FOLDER,
+                                          "testFolder"));
+    Path installedPath = new Path(testFileSystem
+        .buildKeytabInstallationDirPath("testFolder"), localKeytab.getName());
+    File installedKeytab = new File(installedPath.toUri().getPath());
+    assertTrue(installedKeytab.exists());
+    assertEquals(FileUtils.readFileToString(installedKeytab),
+        FileUtils.readFileToString(localKeytab));
+    try {
+      launcher = launch(TestSliderClient.class,
+                        conf,
+                        Arrays.asList(
+                            ClientArgs.ACTION_KEYTAB,
+                            ClientArgs.ARG_KEYTABINSTALL,
+                            ClientArgs.ARG_KEYTAB,
+                            localKeytab.getAbsolutePath(),
+                            Arguments.ARG_FOLDER,
+                            "testFolder"));
+      fail("expected BadCommandArgumentsException from launch");
+    } catch (BadCommandArgumentsException e) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testInstallKeytabWithMissingKeytab() throws Throwable {
+    // create a mock keytab file
+    YarnConfiguration conf = SliderUtils.createConfiguration();
+    try {
+      ServiceLauncher launcher = launch(TestSliderClient.class,
+                                        conf,
+                                        Arrays.asList(
+                                            ClientArgs.ACTION_KEYTAB,
+                                            ClientArgs.ARG_KEYTABINSTALL,
+                                            ClientArgs.ARG_KEYTAB,
+                                            "HeyIDontExist.keytab",
+                                            Arguments.ARG_FOLDER,
+                                            "testFolder"));
+      fail("expected BadCommandArgumentsException from launch");
+    } catch (BadCommandArgumentsException e) {
+      // expected
+    }
+  }
+
+  private File getTempLocation() {
+    return new File(System.getProperty("user.dir") + "/target");
+  }
+
+  /**
+   * Test SliderClient with overridden filesystem.
+   */
+  public static class TestSliderClient extends SliderClient {
+    public TestSliderClient() {
+      super();
+    }
+
+    @Override
+    protected void initHadoopBinding() throws IOException, SliderException {
+      sliderFileSystem = testFileSystem;
+    }
+
+  }
+
+  /**
+   * Appender that captures logging events.
+   */
+  public static class TestAppender extends AppenderSkeleton {
+    private List<LoggingEvent> events = new ArrayList<>();
+
+    public void close() {}
+
+    public boolean requiresLayout() {
+      return false;
+    }
+
+    @Override
+    protected void append(LoggingEvent event) {
+      events.add(event);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderClientMethods.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderClientMethods.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderClientMethods.java
new file mode 100644
index 0000000..32208ab
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderClientMethods.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.client;
+
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.slider.common.SliderXmlConfKeys;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.server.appmaster.model.mock.MockApplicationId;
+import org.apache.slider.utils.SliderTestBase;
+import org.easymock.EasyMock;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.powermock.api.easymock.PowerMock;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Test slider client methods.
+ */
+@RunWith(PowerMockRunner.class)
+@PrepareForTest(SliderUtils.class)
+public class TestSliderClientMethods extends SliderTestBase {
+  protected static final Logger LOG =
+      LoggerFactory.getLogger(TestSliderClientMethods.class);
+
+  static final String AM_ENV = "LD_LIBRARY_PATH";
+  static final String PLACEHOLDER_KEY = "${distro.version}";
+  static final String PLACEHOLDER_SYSTEM_KEY = "DISTRO_VERSION";
+  static final String PLACEHOLDER_VALUE = "1.0.0";
+  static final String AM_ENV_2 = "PATH";
+  static final String PLACEHOLDER_KEY_2 = "${native.version}";
+  static final String PLACEHOLDER_SYSTEM_KEY_2 = "NATIVE_VERSION";
+  static final String PLACEHOLDER_VALUE_2 = "2.0.0";
+
+  @Test
+  public void testGeneratePlaceholderKeyValueMap() throws Throwable {
+    TestSliderClient testSliderClient = new TestSliderClient();
+
+    PowerMock.mockStatic(System.class);
+    EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY))
+      .andReturn(PLACEHOLDER_VALUE).anyTimes();
+    PowerMock.replayAll();
+
+    Map<String, String> placeholders = testSliderClient
+        .generatePlaceholderKeyValueMap(AM_ENV + "=/usr/lib/" +
+            PLACEHOLDER_KEY);
+    Assert.assertTrue(placeholders.containsKey(PLACEHOLDER_KEY));
+    Assert.assertEquals("Should be equal", PLACEHOLDER_VALUE,
+        placeholders.get(PLACEHOLDER_KEY));
+
+    PowerMock.verifyAll();
+    LOG.info("Placeholders = {}", placeholders);
+  }
+
+  @Test
+  public void testSetAmLaunchEnv() throws Throwable {
+    TestSliderClient testSliderClient = new TestSliderClient();
+    YarnConfiguration conf = SliderUtils.createConfiguration();
+    conf.set(SliderXmlConfKeys.KEY_AM_LAUNCH_ENV, AM_ENV + "=/usr/lib/"
+        + PLACEHOLDER_KEY);
+
+    PowerMock.mockStatic(System.class);
+    EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY))
+        .andReturn(PLACEHOLDER_VALUE);
+    PowerMock.replayAll();
+
+    Map<String, String> amLaunchEnv = testSliderClient.getAmLaunchEnv(conf);
+    Assert.assertNotNull(amLaunchEnv);
+    Assert.assertNotNull(amLaunchEnv.get(AM_ENV));
+    Assert.assertEquals("Should be equal", amLaunchEnv.get(AM_ENV),
+        (Shell.WINDOWS ? "%" + AM_ENV + "%;" : "$" + AM_ENV + ":") +
+            "/usr/lib/" + PLACEHOLDER_VALUE);
+
+    PowerMock.verifyAll();
+    LOG.info("amLaunchEnv = {}", amLaunchEnv);
+  }
+
+  @Test
+  public void testSetAmLaunchEnvMulti() throws Throwable {
+    TestSliderClient testSliderClient = new TestSliderClient();
+    YarnConfiguration conf = SliderUtils.createConfiguration();
+    conf.set(SliderXmlConfKeys.KEY_AM_LAUNCH_ENV, AM_ENV + "=/usr/lib/"
+        + PLACEHOLDER_KEY + "," + AM_ENV_2 + "=/usr/bin/" + PLACEHOLDER_KEY_2);
+
+    PowerMock.mockStatic(System.class);
+    EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY))
+        .andReturn(PLACEHOLDER_VALUE);
+    EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY_2))
+        .andReturn(PLACEHOLDER_VALUE_2);
+    PowerMock.replayAll();
+
+    Map<String, String> amLaunchEnv = testSliderClient.getAmLaunchEnv(conf);
+    Assert.assertNotNull(amLaunchEnv);
+    Assert.assertEquals("Should have 2 envs", amLaunchEnv.size(), 2);
+    Assert.assertNotNull(amLaunchEnv.get(AM_ENV));
+    Assert.assertEquals("Should be equal", amLaunchEnv.get(AM_ENV),
+        (Shell.WINDOWS ? "%" + AM_ENV + "%;" : "$" + AM_ENV + ":") +
+            "/usr/lib/" + PLACEHOLDER_VALUE);
+    Assert.assertNotNull(amLaunchEnv.get(AM_ENV_2));
+    Assert.assertEquals("Should be equal", amLaunchEnv.get(AM_ENV_2),
+        (Shell.WINDOWS ? "%" + AM_ENV_2 + "%;" : "$" + AM_ENV_2 + ":") +
+            "/usr/bin/" + PLACEHOLDER_VALUE_2);
+
+    PowerMock.verifyAll();
+    LOG.info("amLaunchEnv = " + amLaunchEnv);
+  }
+
+  static class TestSliderClient extends SliderClient {
+    @Override
+    public ApplicationId submitApplication(ApplicationSubmissionContext
+        context)
+        throws YarnException, IOException {
+      return new MockApplicationId(1);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderTokensCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderTokensCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderTokensCommand.java
new file mode 100644
index 0000000..f649ab7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderTokensCommand.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.client;
+
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.common.params.ActionTokensArgs;
+import org.apache.slider.common.params.Arguments;
+import org.apache.slider.common.params.SliderActions;
+import org.apache.slider.core.exceptions.BadClusterStateException;
+import org.apache.slider.core.exceptions.NotFoundException;
+import org.apache.slider.utils.SliderTestBase;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+/**
+ * Test the argument parsing/validation logic.
+ */
+public class TestSliderTokensCommand extends SliderTestBase {
+
+  private static YarnConfiguration config = createTestConfig();
+
+  public static YarnConfiguration createTestConfig() {
+    YarnConfiguration configuration = new YarnConfiguration();
+    configuration.set(YarnConfiguration.RM_ADDRESS, "127.0.0.1:8032");
+    return configuration;
+  }
+
+  @Test
+  public void testBadSourceArgs() throws Throwable {
+    launchExpectingException(SliderClient.class,
+        config,
+        ActionTokensArgs.DUPLICATE_ARGS,
+        Arrays.asList(SliderActions.ACTION_TOKENS,
+            Arguments.ARG_SOURCE, "target/tokens.bin",
+            Arguments.ARG_OUTPUT, "target/tokens.bin"
+        ));
+  }
+
+  @Test
+  public void testKTNoPrincipal() throws Throwable {
+    launchExpectingException(SliderClient.class,
+        config,
+        ActionTokensArgs.MISSING_KT_PROVIDER,
+        Arrays.asList(SliderActions.ACTION_TOKENS,
+            Arguments.ARG_KEYTAB, "target/keytab"
+        ));
+  }
+
+  @Test
+  public void testPrincipalNoKT() throws Throwable {
+    launchExpectingException(SliderClient.class,
+        config,
+        ActionTokensArgs.MISSING_KT_PROVIDER,
+        Arrays.asList(SliderActions.ACTION_TOKENS,
+            Arguments.ARG_PRINCIPAL, "bob@REALM"
+        ));
+  }
+
+  /**
+   * A missing keytab is an error.
+   * @throws Throwable
+   */
+  @Test
+  public void testMissingKT() throws Throwable {
+    Throwable ex = launchExpectingException(SliderClient.class,
+        config,
+        TokensOperation.E_NO_KEYTAB,
+        Arrays.asList(SliderActions.ACTION_TOKENS,
+            Arguments.ARG_PRINCIPAL, "bob@REALM",
+            Arguments.ARG_KEYTAB, "target/keytab"
+        ));
+    if (!(ex instanceof NotFoundException)) {
+      throw ex;
+    }
+  }
+
+  @Test
+  public void testMissingSourceFile() throws Throwable {
+    Throwable ex = launchExpectingException(SliderClient.class,
+        config,
+        TokensOperation.E_MISSING_SOURCE_FILE,
+        Arrays.asList(SliderActions.ACTION_TOKENS,
+            Arguments.ARG_SOURCE, "target/tokens.bin"
+        ));
+    if (!(ex instanceof NotFoundException)) {
+      throw ex;
+    }
+  }
+
+  @Test
+  public void testListHarmlessWhenInsecure() throws Throwable {
+    execSliderCommand(0, config, Arrays.asList(SliderActions.ACTION_TOKENS));
+  }
+
+  @Test
+  public void testCreateFailsWhenInsecure() throws Throwable {
+    Throwable ex = launchExpectingException(SliderClient.class,
+        config,
+        TokensOperation.E_INSECURE,
+        Arrays.asList(SliderActions.ACTION_TOKENS,
+            Arguments.ARG_OUTPUT, "target/tokens.bin"
+        ));
+    if (!(ex instanceof BadClusterStateException)) {
+      throw ex;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestClusterNames.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestClusterNames.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestClusterNames.java
new file mode 100644
index 0000000..efd0c2f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestClusterNames.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Test cluster name validation.
+ */
+public class TestClusterNames {
+
+  void assertValidName(String name) {
+    boolean valid = SliderUtils.isClusternameValid(name);
+    Assert.assertTrue("Clustername '" + name + "' mistakenly declared invalid",
+                      valid);
+  }
+
+  void assertInvalidName(String name) {
+    boolean valid = SliderUtils.isClusternameValid(name);
+    Assert.assertFalse("Clustername '\" + name + \"' mistakenly declared valid",
+                       valid);
+  }
+
+  void assertInvalid(List<String> names) {
+    for (String name : names) {
+      assertInvalidName(name);
+    }
+  }
+
+  void assertValid(List<String> names) {
+    for (String name : names) {
+      assertValidName(name);
+    }
+  }
+
+  @Test
+  public void testEmptyName() throws Throwable {
+    assertInvalidName("");
+  }
+
+  @Test
+  public void testSpaceName() throws Throwable {
+    assertInvalidName(" ");
+  }
+
+
+  @Test
+  public void testLeadingHyphen() throws Throwable {
+    assertInvalidName("-hyphen");
+  }
+
+  @Test
+  public void testTitleLetters() throws Throwable {
+    assertInvalidName("Title");
+  }
+
+  @Test
+  public void testCapitalLetters() throws Throwable {
+    assertInvalidName("UPPER-CASE-CLUSTER");
+  }
+
+  @Test
+  public void testInnerBraced() throws Throwable {
+    assertInvalidName("a[a");
+  }
+
+  @Test
+  public void testLeadingBrace() throws Throwable {
+    assertInvalidName("[");
+  }
+
+  @Test
+  public void testNonalphaLeadingChars() throws Throwable {
+    assertInvalid(Arrays.asList(
+        "[a", "#", "@", "=", "*", "."
+    ));
+  }
+
+  @Test
+  public void testNonalphaInnerChars() throws Throwable {
+    assertInvalid(Arrays.asList(
+        "a[a", "b#", "c@", "d=", "e*", "f.", "g ", "h i"
+    ));
+  }
+
+  @Test
+  public void testClusterValid() throws Throwable {
+    assertValidName("cluster");
+  }
+
+  @Test
+  public void testValidNames() throws Throwable {
+    assertValid(Arrays.asList(
+        "cluster",
+        "cluster1",
+        "very-very-very-long-cluster-name",
+        "c1234567890"
+    ));
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelper.java
new file mode 100644
index 0000000..45c6118
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelper.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.slider.utils.YarnMiniClusterTestBase;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.InputStream;
+import java.util.Map;
+
+/**
+ * Test config helper.
+ */
+public class TestConfigHelper extends YarnMiniClusterTestBase {
+
+  @Test
+  public void testConfigLoaderIteration() throws Throwable {
+
+    String xml = "<?xml version=\"1.0\" encoding=\"UTF-8\" " +
+        "standalone=\"no\"?><configuration><property><name>key</name>" +
+        "<value>value</value><source>programatically</source></property>" +
+        "</configuration>";
+    InputStream ins = new ByteArrayInputStream(xml.getBytes("UTF8"));
+    Configuration conf = new Configuration(false);
+    conf.addResource(ins);
+    Configuration conf2 = new Configuration(false);
+    for (Map.Entry<String, String> entry : conf) {
+      conf2.set(entry.getKey(), entry.getValue(), "src");
+    }
+
+  }
+
+  @Test
+  public void testConfigDeprecation() throws Throwable {
+    ConfigHelper.registerDeprecatedConfigItems();
+    Configuration conf = new Configuration(false);
+    // test deprecated items here
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelperHDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelperHDFS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelperHDFS.java
new file mode 100644
index 0000000..f9a58d4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelperHDFS.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.utils.YarnMiniClusterTestBase;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.URI;
+
+/**
+ * Test config helper loading configuration from HDFS.
+ */
+public class TestConfigHelperHDFS extends YarnMiniClusterTestBase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestConfigHelperHDFS.class);
+
+  @Test
+  public void testConfigHelperHDFS() throws Throwable {
+    YarnConfiguration config = getConfiguration();
+    createMiniHDFSCluster("testConfigHelperHDFS", config);
+
+    Configuration conf = new Configuration(false);
+    conf.set("key", "value");
+    URI fsURI = new URI(getFsDefaultName());
+    Path root = new Path(fsURI);
+    Path confPath = new Path(root, "conf.xml");
+    FileSystem dfs = FileSystem.get(fsURI, config);
+    ConfigHelper.saveConfig(dfs, confPath, conf);
+    //load time
+    Configuration loaded = ConfigHelper.loadConfiguration(dfs, confPath);
+    LOG.info(ConfigHelper.dumpConfigToString(loaded));
+    assertEquals("value", loaded.get("key"));
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: YARN-6395. Integrate service app master to write data into ATSv2. Contributed by Rohith Sharma K S

Posted by ji...@apache.org.
YARN-6395. Integrate service app master to write data into ATSv2. Contributed by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2f9ea483
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2f9ea483
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2f9ea483

Branch: refs/heads/yarn-native-services
Commit: 2f9ea483d5102f2ac03699171814d84a03f64a84
Parents: c0536f1
Author: Jian He <ji...@apache.org>
Authored: Thu Mar 30 15:58:51 2017 +0800
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:24 2017 -0700

----------------------------------------------------------------------
 .../server/appmaster/SliderAppMaster.java       |  63 ++++
 .../slider/server/appmaster/state/AppState.java |  11 +
 .../ServiceTimelinePublisher.java               | 365 +++++++++++++++++++
 .../timelineservice/SliderMetricsSink.java      | 102 ++++++
 .../SliderTimelineEntityType.java               |  39 ++
 .../timelineservice/SliderTimelineEvent.java    |  34 ++
 .../SliderTimelineMetricsConstants.java         |  91 +++++
 .../appmaster/timelineservice/package-info.java |  27 ++
 .../TestServiceTimelinePublisher.java           | 285 +++++++++++++++
 .../appmaster/timelineservice/package-info.java |  26 ++
 10 files changed, 1043 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f9ea483/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
index 4922c2d..4fa2769 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -64,6 +64,7 @@ import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.TimelineClient;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
 import org.apache.hadoop.yarn.client.api.async.NMClientAsync;
@@ -146,6 +147,8 @@ import org.apache.slider.server.appmaster.state.ContainerAssignment;
 import org.apache.slider.server.appmaster.state.MostRecentContainerReleaseSelector;
 import org.apache.slider.server.appmaster.state.ProviderAppState;
 import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.timelineservice.ServiceTimelinePublisher;
+import org.apache.slider.server.appmaster.timelineservice.SliderMetricsSink;
 import org.apache.slider.server.appmaster.web.SliderAMWebApp;
 import org.apache.slider.server.appmaster.web.WebAppApi;
 import org.apache.slider.server.appmaster.web.WebAppApiImpl;
@@ -240,6 +243,13 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
   @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private AMRMClientAsync asyncRMClient;
 
+  /** Handle to communicate with the timeline service */
+  private TimelineClient timelineClient;
+
+  private boolean timelineServiceEnabled = false;
+
+  ServiceTimelinePublisher serviceTimelinePublisher;
+
   @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized")
   private RMOperationHandler rmOperationHandler;
 
@@ -483,6 +493,10 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     addService(executorService);
 
     addService(actionQueues);
+    if (YarnConfiguration.timelineServiceV2Enabled(conf)) {
+      timelineServiceEnabled = true;
+      log.info("Enabled YARN timeline service v2. ");
+    }
 
     //init all child services
     super.serviceInit(conf);
@@ -650,6 +664,20 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       //now bring it up
       deployChildService(asyncRMClient);
 
+      if (timelineServiceEnabled) {
+        timelineClient = TimelineClient.createTimelineClient(appid);
+        asyncRMClient.registerTimelineClient(timelineClient);
+        timelineClient.init(getConfig());
+        timelineClient.start();
+        log.info("Timeline client started.");
+
+        serviceTimelinePublisher = new ServiceTimelinePublisher(timelineClient);
+        serviceTimelinePublisher.init(getConfig());
+        serviceTimelinePublisher.start();
+        appState.setServiceTimelinePublisher(serviceTimelinePublisher);
+        log.info("ServiceTimelinePublisher started.");
+      }
+
 
       // nmclient relays callbacks back to this class
       nmClientAsync = new NMClientAsyncImpl("nmclient", this);
@@ -781,6 +809,12 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
       liveContainers = amRegistrationData.getContainersFromPreviousAttempts();
       DefaultMetricsSystem.initialize("SliderAppMaster");
+      if (timelineServiceEnabled) {
+        DefaultMetricsSystem.instance().register("SliderMetricsSink",
+            "For processing metrics to ATS",
+            new SliderMetricsSink(serviceTimelinePublisher));
+        log.info("SliderMetricsSink registered.");
+      }
 
       //determine the location for the role history data
       Path historyDir = new Path(appDir, HISTORY_DIR_NAME);
@@ -1132,6 +1166,9 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
           yarnRegistryOperations.getSelfRegistrationPath(),
           true);
     }
+    if (timelineServiceEnabled) {
+      serviceTimelinePublisher.serviceAttemptRegistered(appState);
+    }
   }
 
   /**
@@ -1184,6 +1221,11 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     container.setState(org.apache.slider.api.resource.ContainerState.INIT);
     container.setBareHost(instance.host);
     instance.providerRole.component.addContainer(container);
+
+    if (timelineServiceEnabled) {
+      serviceTimelinePublisher.componentInstanceStarted(container,
+          instance.providerRole.component.getName());
+    }
     return true;
   }
 
@@ -1345,6 +1387,12 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     releaseAllContainers(application);
     DefaultMetricsSystem.shutdown();
 
+    if (timelineServiceEnabled) {
+      serviceTimelinePublisher.serviceAttemptUnregistered(appState, stopAction);
+      serviceTimelinePublisher.stop();
+      timelineClient.stop();
+    }
+
     // When the application completes, it should send a finish application
     // signal to the RM
     log.info("Application completed. Signalling finish to RM");
@@ -1490,6 +1538,10 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       if(!result.unknownNode) {
         queue(new UnregisterComponentInstance(containerId, 0,
             TimeUnit.MILLISECONDS));
+        if (timelineServiceEnabled && result.roleInstance != null) {
+          serviceTimelinePublisher
+              .componentInstanceFinished(result.roleInstance);
+        }
       }
     }
 
@@ -1967,6 +2019,17 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
         nmClientAsync.getContainerStatusAsync(containerId,
             cinfo.container.getNodeId());
       }
+    } else if (timelineServiceEnabled) {
+      RoleInstance instance = appState.getOwnedContainer(containerId);
+      if (instance != null) {
+        org.apache.slider.api.resource.Container container =
+            instance.providerRole.component
+                .getContainer(containerId.toString());
+        if (container != null) {
+          serviceTimelinePublisher.componentInstanceUpdated(container,
+              instance.providerRole.component.getName());
+        }
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f9ea483/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
index e891a27..84b8140 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
@@ -64,6 +64,7 @@ import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
 import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation;
 import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
 import org.apache.slider.server.appmaster.operations.UpdateBlacklistOperation;
+import org.apache.slider.server.appmaster.timelineservice.ServiceTimelinePublisher;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -207,6 +208,8 @@ public class AppState {
   private Resource maxResource;
 
   private SliderMetrics appMetrics;
+
+  private ServiceTimelinePublisher serviceTimelinePublisher;
   /**
    * Create an instance
    * @param recordFactory factory for YARN records
@@ -1762,6 +1765,10 @@ public class AppState {
         log.info("Releasing container. Log: " + url);
         try {
           containerReleaseSubmitted(possible);
+          // update during finish call
+          if (serviceTimelinePublisher != null) {
+            serviceTimelinePublisher.componentInstanceFinished(instance);
+          }
         } catch (SliderInternalStateException e) {
           log.warn("when releasing container {} :", possible, e);
         }
@@ -1948,4 +1955,8 @@ public class AppState {
     }
     return naming;
   }
+
+  public void setServiceTimelinePublisher(ServiceTimelinePublisher serviceTimelinePublisher) {
+    this.serviceTimelinePublisher = serviceTimelinePublisher;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f9ea483/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/ServiceTimelinePublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/ServiceTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/ServiceTimelinePublisher.java
new file mode 100644
index 0000000..3ff4200
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/ServiceTimelinePublisher.java
@@ -0,0 +1,365 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.timelineservice;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.client.api.TimelineClient;
+import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.api.resource.ConfigFile;
+import org.apache.slider.api.resource.Configuration;
+import org.apache.slider.api.resource.Container;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.server.appmaster.actions.ActionStopSlider;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A single service that publishes all the Timeline Entities.
+ */
+public class ServiceTimelinePublisher extends CompositeService {
+
+  // Number of bytes of config which can be published in one shot to ATSv2.
+  public static final int ATS_CONFIG_PUBLISH_SIZE_BYTES = 10 * 1024;
+
+  private TimelineClient timelineClient;
+
+  private volatile boolean stopped = false;
+
+  private static final Logger log =
+      LoggerFactory.getLogger(ServiceTimelinePublisher.class);
+
+  @Override
+  protected void serviceStop() throws Exception {
+    stopped = true;
+  }
+
+  public boolean isStopped() {
+    return stopped;
+  }
+
+  public ServiceTimelinePublisher(TimelineClient client) {
+    super(ServiceTimelinePublisher.class.getName());
+    timelineClient = client;
+  }
+
+  public void serviceAttemptRegistered(AppState appState) {
+    Application application = appState.getClusterStatus();
+    long currentTimeMillis = application.getLaunchTime() == null
+        ? System.currentTimeMillis() : application.getLaunchTime().getTime();
+
+    TimelineEntity entity = createServiceAttemptEntity(application.getId());
+    entity.setCreatedTime(currentTimeMillis);
+
+    // create info keys
+    Map<String, Object> entityInfos = new HashMap<String, Object>();
+    entityInfos.put(SliderTimelineMetricsConstants.NAME, application.getName());
+    entityInfos.put(SliderTimelineMetricsConstants.STATE,
+        application.getState().toString());
+    entityInfos.put(SliderTimelineMetricsConstants.LAUNCH_TIME,
+        currentTimeMillis);
+    entity.addInfo(entityInfos);
+
+    // add an event
+    TimelineEvent startEvent = new TimelineEvent();
+    startEvent.setId(SliderTimelineEvent.SERVICE_ATTEMPT_REGISTERED.toString());
+    startEvent.setTimestamp(currentTimeMillis);
+    entity.addEvent(startEvent);
+
+    // publish before configurations published
+    putEntity(entity);
+
+    // publish application specific configurations
+    publishConfigurations(application.getConfiguration(), application.getId(),
+        SliderTimelineEntityType.SERVICE_ATTEMPT.toString(), true);
+
+    // publish component as separate entity.
+    publishComponents(application.getComponents());
+  }
+
+  public void serviceAttemptUnregistered(AppState appState,
+      ActionStopSlider stopAction) {
+    long currentTimeMillis = System.currentTimeMillis();
+
+    TimelineEntity entity =
+        createServiceAttemptEntity(appState.getClusterStatus().getId());
+
+    // add info
+    Map<String, Object> entityInfos = new HashMap<String, Object>();
+    entityInfos.put(SliderTimelineMetricsConstants.EXIT_STATUS_CODE,
+        stopAction.getExitCode());
+    entityInfos.put(SliderTimelineMetricsConstants.STATE,
+        stopAction.getFinalApplicationStatus().toString());
+    if (stopAction.getMessage() != null) {
+      entityInfos.put(SliderTimelineMetricsConstants.EXIT_REASON,
+          stopAction.getMessage());
+    }
+    if (stopAction.getEx() != null) {
+      entityInfos.put(SliderTimelineMetricsConstants.DIAGNOSTICS_INFO,
+          stopAction.getEx().toString());
+    }
+    entity.addInfo(entityInfos);
+
+    // add an event
+    TimelineEvent startEvent = new TimelineEvent();
+    startEvent
+        .setId(SliderTimelineEvent.SERVICE_ATTEMPT_UNREGISTERED.toString());
+    startEvent.setTimestamp(currentTimeMillis);
+    entity.addEvent(startEvent);
+
+    putEntity(entity);
+  }
+
+  public void componentInstanceStarted(Container container,
+      String componentName) {
+
+    TimelineEntity entity = createComponentInstanceEntity(container.getId());
+    entity.setCreatedTime(container.getLaunchTime().getTime());
+
+    // create info keys
+    Map<String, Object> entityInfos = new HashMap<String, Object>();
+    entityInfos.put(SliderTimelineMetricsConstants.BARE_HOST,
+        container.getBareHost());
+    entityInfos.put(SliderTimelineMetricsConstants.STATE,
+        container.getState().toString());
+    entityInfos.put(SliderTimelineMetricsConstants.LAUNCH_TIME,
+        container.getLaunchTime().getTime());
+    entityInfos.put(SliderTimelineMetricsConstants.COMPONENT_NAME,
+        componentName);
+    entity.addInfo(entityInfos);
+
+    // add an event
+    TimelineEvent startEvent = new TimelineEvent();
+    startEvent
+        .setId(SliderTimelineEvent.COMPONENT_INSTANCE_REGISTERED.toString());
+    startEvent.setTimestamp(container.getLaunchTime().getTime());
+    entity.addEvent(startEvent);
+
+    putEntity(entity);
+  }
+
+  public void componentInstanceFinished(RoleInstance instance) {
+    TimelineEntity entity = createComponentInstanceEntity(instance.id);
+
+    // create info keys
+    Map<String, Object> entityInfos = new HashMap<String, Object>();
+    entityInfos.put(SliderTimelineMetricsConstants.EXIT_STATUS_CODE,
+        instance.exitCode);
+    entityInfos.put(SliderTimelineMetricsConstants.DIAGNOSTICS_INFO,
+        instance.diagnostics);
+    // TODO need to change the state based on enum value.
+    entityInfos.put(SliderTimelineMetricsConstants.STATE, "FINISHED");
+    entity.addInfo(entityInfos);
+
+    // add an event
+    TimelineEvent startEvent = new TimelineEvent();
+    startEvent
+        .setId(SliderTimelineEvent.COMPONENT_INSTANCE_UNREGISTERED.toString());
+    startEvent.setTimestamp(System.currentTimeMillis());
+    entity.addEvent(startEvent);
+
+    putEntity(entity);
+  }
+
+  public void componentInstanceUpdated(Container container,
+      String componentName) {
+    TimelineEntity entity = createComponentInstanceEntity(container.getId());
+
+    // create info keys
+    Map<String, Object> entityInfos = new HashMap<String, Object>();
+    entityInfos.put(SliderTimelineMetricsConstants.IP, container.getIp());
+    entityInfos.put(SliderTimelineMetricsConstants.HOSTNAME,
+        container.getHostname());
+    entityInfos.put(SliderTimelineMetricsConstants.STATE,
+        container.getState().toString());
+    entity.addInfo(entityInfos);
+
+    TimelineEvent updateEvent = new TimelineEvent();
+    updateEvent
+        .setId(SliderTimelineEvent.COMPONENT_INSTANCE_UPDATED.toString());
+    updateEvent.setTimestamp(System.currentTimeMillis());
+    entity.addEvent(updateEvent);
+
+    putEntity(entity);
+  }
+
+  private void publishComponents(List<Component> components) {
+    long currentTimeMillis = System.currentTimeMillis();
+    for (Component component : components) {
+      TimelineEntity entity = createComponentEntity(component.getName());
+      entity.setCreatedTime(currentTimeMillis);
+
+      // create info keys
+      Map<String, Object> entityInfos = new HashMap<String, Object>();
+      entityInfos.put(SliderTimelineMetricsConstants.ARTIFACT_ID,
+          component.getArtifact().getId());
+      entityInfos.put(SliderTimelineMetricsConstants.ARTIFACT_TYPE,
+          component.getArtifact().getType().toString());
+      if (component.getResource().getProfile() != null) {
+        entityInfos.put(SliderTimelineMetricsConstants.RESOURCE_PROFILE,
+            component.getResource().getProfile());
+      }
+      entityInfos.put(SliderTimelineMetricsConstants.RESOURCE_CPU,
+          component.getResource().getCpus());
+      entityInfos.put(SliderTimelineMetricsConstants.RESOURCE_MEMORY,
+          component.getResource().getMemory());
+
+      if (component.getLaunchCommand() != null) {
+        entityInfos.put(SliderTimelineMetricsConstants.LAUNCH_COMMAND,
+            component.getLaunchCommand());
+      }
+      entityInfos.put(SliderTimelineMetricsConstants.UNIQUE_COMPONENT_SUPPORT,
+          component.getUniqueComponentSupport().toString());
+      entityInfos.put(SliderTimelineMetricsConstants.RUN_PRIVILEGED_CONTAINER,
+          component.getRunPrivilegedContainer().toString());
+      if (component.getPlacementPolicy() != null) {
+        entityInfos.put(SliderTimelineMetricsConstants.PLACEMENT_POLICY,
+            component.getPlacementPolicy().getLabel());
+      }
+      entity.addInfo(entityInfos);
+
+      putEntity(entity);
+
+      // publish component specific configurations
+      publishConfigurations(component.getConfiguration(), component.getName(),
+          SliderTimelineEntityType.COMPONENT.toString(), false);
+    }
+  }
+
+  private void publishConfigurations(Configuration configuration,
+      String entityId, String entityType, boolean isServiceAttemptEntity) {
+    if (isServiceAttemptEntity) {
+      // publish slider-client.xml properties at service level
+      publishConfigurations(SliderUtils.loadSliderClientXML().iterator(),
+          entityId, entityType);
+    }
+    publishConfigurations(configuration.getProperties().entrySet().iterator(),
+        entityId, entityType);
+
+    publishConfigurations(configuration.getEnv().entrySet().iterator(),
+        entityId, entityType);
+
+    for (ConfigFile configFile : configuration.getFiles()) {
+      publishConfigurations(configFile.getProps().entrySet().iterator(),
+          entityId, entityType);
+    }
+  }
+
+  private void publishConfigurations(Iterator<Entry<String, String>> iterator,
+      String entityId, String entityType) {
+    int configSize = 0;
+    TimelineEntity entity = createTimelineEntity(entityId, entityType);
+    while (iterator.hasNext()) {
+      Entry<String, String> entry = iterator.next();
+      int size = entry.getKey().length() + entry.getValue().length();
+      configSize += size;
+      // Configs are split into multiple entities if they exceed 100kb in size.
+      if (configSize > ATS_CONFIG_PUBLISH_SIZE_BYTES) {
+        if (entity.getConfigs().size() > 0) {
+          putEntity(entity);
+          entity = createTimelineEntity(entityId, entityType);
+        }
+        configSize = size;
+      }
+      entity.addConfig(entry.getKey(), entry.getValue());
+    }
+    if (configSize > 0) {
+      putEntity(entity);
+    }
+  }
+
+  /**
+   * Called from SliderMetricsSink at regular interval of time.
+   * @param metrics of service or components
+   * @param entityId Id of entity
+   * @param entityType Type of entity
+   * @param timestamp
+   */
+  public void publishMetrics(Iterable<AbstractMetric> metrics, String entityId,
+      String entityType, long timestamp) {
+    TimelineEntity entity = createTimelineEntity(entityId, entityType);
+    Set<TimelineMetric> entityMetrics = new HashSet<TimelineMetric>();
+    for (AbstractMetric metric : metrics) {
+      TimelineMetric timelineMetric = new TimelineMetric();
+      timelineMetric.setId(metric.name());
+      timelineMetric.addValue(timestamp, metric.value());
+      entityMetrics.add(timelineMetric);
+    }
+    entity.setMetrics(entityMetrics);
+    putEntity(entity);
+  }
+
+  private TimelineEntity createServiceAttemptEntity(String serviceId) {
+    TimelineEntity entity = createTimelineEntity(serviceId,
+        SliderTimelineEntityType.SERVICE_ATTEMPT.toString());
+    return entity;
+  }
+
+  private TimelineEntity createComponentInstanceEntity(String instanceId) {
+    TimelineEntity entity = createTimelineEntity(instanceId,
+        SliderTimelineEntityType.COMPONENT_INSTANCE.toString());
+    return entity;
+  }
+
+  private TimelineEntity createComponentEntity(String componentId) {
+    TimelineEntity entity = createTimelineEntity(componentId,
+        SliderTimelineEntityType.COMPONENT.toString());
+    return entity;
+  }
+
+  private TimelineEntity createTimelineEntity(String entityId,
+      String entityType) {
+    TimelineEntity entity = new TimelineEntity();
+    entity.setId(entityId);
+    entity.setType(entityType);
+    return entity;
+  }
+
+  private void putEntity(TimelineEntity entity) {
+    try {
+      if (log.isDebugEnabled()) {
+        log.debug("Publishing the entity " + entity + ", JSON-style content: "
+            + TimelineUtils.dumpTimelineRecordtoJSON(entity));
+      }
+      if (timelineClient != null) {
+        timelineClient.putEntitiesAsync(entity);
+      } else {
+        log.error("Seems like client has been removed before the entity "
+            + "could be published for " + entity);
+      }
+    } catch (Exception e) {
+      log.error("Error when publishing entity " + entity, e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f9ea483/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderMetricsSink.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderMetricsSink.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderMetricsSink.java
new file mode 100644
index 0000000..869ae26
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderMetricsSink.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.timelineservice;
+
+import org.apache.commons.configuration2.SubsetConfiguration;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Write the metrics to a ATSv2. Generally, this class is instantiated via
+ * hadoop-metrics2 property files. Specifically, you would create this class by
+ * adding the following to by This would actually be set as: <code>
+ * [prefix].sink.[some instance name].class
+ * =org.apache.slider.server.appmaster.timelineservice.SliderMetricsSink
+ * </code>, where <tt>prefix</tt> is "atsv2": and <tt>some instance name</tt> is
+ * just any unique name, so properties can be differentiated if there are
+ * multiple sinks of the same type created
+ */
+public class SliderMetricsSink implements MetricsSink {
+
+  private static final Logger log =
+      LoggerFactory.getLogger(SliderMetricsSink.class);
+
+  private ServiceTimelinePublisher serviceTimelinePublisher;
+
+  public SliderMetricsSink() {
+
+  }
+
+  public SliderMetricsSink(ServiceTimelinePublisher publisher) {
+    serviceTimelinePublisher = publisher;
+  }
+
+  /**
+   * Publishes service and component metrics to ATS.
+   */
+  @Override
+  public void putMetrics(MetricsRecord record) {
+    if (serviceTimelinePublisher.isStopped()) {
+      log.warn("ServiceTimelinePublisher has stopped. "
+          + "Not publishing any more metrics to ATS.");
+      return;
+    }
+
+    boolean isServiceMetrics = false;
+    boolean isComponentMetrics = false;
+    String appId = null;
+    for (MetricsTag tag : record.tags()) {
+      if (tag.name().equals("type") && tag.value().equals("service")) {
+        isServiceMetrics = true;
+      } else if (tag.name().equals("type") && tag.value().equals("component")) {
+        isComponentMetrics = true;
+        break; // if component metrics, no more information required from tag so
+               // break the loop
+      } else if (tag.name().equals("appId")) {
+        appId = tag.value();
+      }
+    }
+
+    if (isServiceMetrics && appId != null) {
+      if (log.isDebugEnabled()) {
+        log.debug("Publishing service metrics. " + record);
+      }
+      serviceTimelinePublisher.publishMetrics(record.metrics(), appId,
+          SliderTimelineEntityType.SERVICE_ATTEMPT.toString(),
+          record.timestamp());
+    } else if (isComponentMetrics) {
+      if (log.isDebugEnabled()) {
+        log.debug("Publishing Component metrics. " + record);
+      }
+      serviceTimelinePublisher.publishMetrics(record.metrics(), record.name(),
+          SliderTimelineEntityType.COMPONENT.toString(), record.timestamp());
+    }
+  }
+
+  @Override
+  public void init(SubsetConfiguration conf) {
+  }
+
+  @Override
+  public void flush() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f9ea483/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineEntityType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineEntityType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineEntityType.java
new file mode 100644
index 0000000..908754f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineEntityType.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.timelineservice;
+
+/**
+ * Slider entities that are published to ATS.
+ */
+public enum SliderTimelineEntityType {
+  /**
+   * Used for publishing service entity information.
+   */
+  SERVICE_ATTEMPT,
+
+  /**
+   * Used for publishing component entity information.
+   */
+  COMPONENT,
+
+  /**
+   * Used for publishing component instance entity information.
+   */
+  COMPONENT_INSTANCE
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f9ea483/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineEvent.java
new file mode 100644
index 0000000..04f0219
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineEvent.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.timelineservice;
+
+/**
+ * Events that are used to store in ATS.
+ */
+public enum SliderTimelineEvent {
+  SERVICE_ATTEMPT_REGISTERED,
+
+  SERVICE_ATTEMPT_UNREGISTERED,
+
+  COMPONENT_INSTANCE_REGISTERED,
+
+  COMPONENT_INSTANCE_UNREGISTERED,
+
+  COMPONENT_INSTANCE_UPDATED
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f9ea483/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineMetricsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineMetricsConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineMetricsConstants.java
new file mode 100644
index 0000000..23e059d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/SliderTimelineMetricsConstants.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.timelineservice;
+
+/**
+ * Constants which are stored as key in ATS
+ */
+public final class SliderTimelineMetricsConstants {
+
+  public static final String URI = "URI";
+
+  public static final String NAME = "NAME";
+
+  public static final String STATE = "STATE";
+
+  public static final String EXIT_STATUS_CODE = "EXIT_STATUS_CODE";
+
+  public static final String EXIT_REASON = "EXIT_REASON";
+
+  public static final String DIAGNOSTICS_INFO = "DIAGNOSTICS_INFO";
+
+  public static final String LAUNCH_TIME = "LAUNCH_TIME";
+
+  public static final String LAUNCH_COMMAND = "LAUNCH_COMMAND";
+
+  public static final String TOTAL_CONTAINERS = "NUMBER_OF_CONTAINERS";
+
+  public static final String RUNNING_CONTAINERS =
+      "NUMBER_OF_RUNNING_CONTAINERS";
+
+  /**
+   * Artifacts constants.
+   */
+  public static final String ARTIFACT_ID = "ARTIFACT_ID";
+
+  public static final String ARTIFACT_TYPE = "ARTIFACT_TYPE";
+
+  public static final String ARTIFACT_URI = "ARTIFACT_URI";
+
+  /**
+   * Resource constants.
+   */
+  public static final String RESOURCE_CPU = "RESOURCE_CPU";
+
+  public static final String RESOURCE_MEMORY = "RESOURCE_MEMORY";
+
+  public static final String RESOURCE_PROFILE = "RESOURCE_PROFILE";
+
+  /**
+   * component instance constants.
+   */
+  public static final String IP = "IP";
+
+  public static final String HOSTNAME = "HOSTNAME";
+
+  public static final String BARE_HOST = "BARE_HOST";
+
+  public static final String COMPONENT_NAME = "COMPONENT_NAME";
+
+  /**
+   * component constants.
+   */
+  public static final String DEPENDENCIES = "DEPENDENCIES";
+
+  public static final String DESCRIPTION = "DESCRIPTION";
+
+  public static final String UNIQUE_COMPONENT_SUPPORT =
+      "UNIQUE_COMPONENT_SUPPORT";
+
+  public static final String RUN_PRIVILEGED_CONTAINER =
+      "RUN_PRIVILEGED_CONTAINER";
+
+  public static final String PLACEMENT_POLICY = "PLACEMENT_POLICY";
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f9ea483/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/package-info.java
new file mode 100644
index 0000000..0bffc90
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/package-info.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * ATS implementation
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.slider.server.appmaster.timelineservice;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f9ea483/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/TestServiceTimelinePublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/TestServiceTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/TestServiceTimelinePublisher.java
new file mode 100644
index 0000000..1209aef
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/TestServiceTimelinePublisher.java
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.timelineservice;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity.Identifier;
+import org.apache.hadoop.yarn.client.api.TimelineClient;
+import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.ApplicationState;
+import org.apache.slider.api.resource.Artifact;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.api.resource.Container;
+import org.apache.slider.api.resource.ContainerState;
+import org.apache.slider.api.resource.PlacementPolicy;
+import org.apache.slider.api.resource.Resource;
+import org.apache.slider.server.appmaster.actions.ActionStopSlider;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test class for ServiceTimelinePublisher.
+ */
+public class TestServiceTimelinePublisher {
+  private TimelineClient timelineClient;
+  private Configuration config;
+  private ServiceTimelinePublisher serviceTimelinePublisher;
+  private static String SERVICE_NAME = "HBASE";
+  private static String SERVICEID = "application_1490093646524_0005";
+  private static String ARTIFACTID = "ARTIFACTID";
+  private static String COMPONENT_NAME = "DEFAULT";
+  private static String CONTAINER_ID =
+      "container_e02_1490093646524_0005_01_000001";
+  private static String CONTAINER_IP =
+      "localhost";
+  private static String CONTAINER_HOSTNAME =
+      "cnl124-localhost.site";
+  private static String CONTAINER_BAREHOST =
+      "localhost.com";
+
+  @Before
+  public void setUp() throws Exception {
+    config = new Configuration();
+    timelineClient = new DummyTimelineClient();
+    serviceTimelinePublisher = new ServiceTimelinePublisher(timelineClient);
+    timelineClient.init(config);
+    serviceTimelinePublisher.init(config);
+    timelineClient.start();
+    serviceTimelinePublisher.start();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    serviceTimelinePublisher.stop();
+    timelineClient.stop();
+  }
+
+  @Test
+  public void testServiceAttemptEntity() {
+    AppState appState = createMockAppState();
+    int exitCode = 0;
+    String message = "Stopped by user";
+    ActionStopSlider stopAction = mock(ActionStopSlider.class);
+    when(stopAction.getExitCode()).thenReturn(exitCode);
+    when(stopAction.getFinalApplicationStatus())
+        .thenReturn(FinalApplicationStatus.SUCCEEDED);
+    when(stopAction.getMessage()).thenReturn(message);
+
+    serviceTimelinePublisher.serviceAttemptRegistered(appState);
+
+    Collection<TimelineEntity> lastPublishedEntities =
+        ((DummyTimelineClient) timelineClient).getLastPublishedEntities();
+    // 2 entities because during registration component also registered.
+    assertEquals(2, lastPublishedEntities.size());
+    for (TimelineEntity timelineEntity : lastPublishedEntities) {
+      if (timelineEntity.getType() == SliderTimelineEntityType.COMPONENT
+          .toString()) {
+        verifyComponentTimelineEntity(timelineEntity);
+      } else {
+        verifyServiceAttemptTimelineEntity(timelineEntity, 0, null, true);
+      }
+    }
+
+    serviceTimelinePublisher.serviceAttemptUnregistered(appState, stopAction);
+    lastPublishedEntities =
+        ((DummyTimelineClient) timelineClient).getLastPublishedEntities();
+    for (TimelineEntity timelineEntity : lastPublishedEntities) {
+      if (timelineEntity.getType() == SliderTimelineEntityType.SERVICE_ATTEMPT
+          .toString()) {
+        verifyServiceAttemptTimelineEntity(timelineEntity, exitCode, message,
+            false);
+      }
+    }
+  }
+
+  @Test
+  public void testComponentInstanceEntity() {
+    Container container = new Container();
+    container.id(CONTAINER_ID).ip(CONTAINER_IP).bareHost(CONTAINER_BAREHOST)
+        .hostname(CONTAINER_HOSTNAME).state(ContainerState.INIT)
+        .launchTime(new Date());
+    serviceTimelinePublisher.componentInstanceStarted(container,
+        COMPONENT_NAME);
+
+    Collection<TimelineEntity> lastPublishedEntities =
+        ((DummyTimelineClient) timelineClient).getLastPublishedEntities();
+    assertEquals(1, lastPublishedEntities.size());
+    TimelineEntity entity = lastPublishedEntities.iterator().next();
+
+    assertEquals(1, entity.getEvents().size());
+    assertEquals(CONTAINER_ID, entity.getId());
+    assertEquals(CONTAINER_BAREHOST,
+        entity.getInfo().get(SliderTimelineMetricsConstants.BARE_HOST));
+    assertEquals(COMPONENT_NAME,
+        entity.getInfo().get(SliderTimelineMetricsConstants.COMPONENT_NAME));
+    assertEquals(ContainerState.INIT.toString(),
+        entity.getInfo().get(SliderTimelineMetricsConstants.STATE));
+
+    // updated container state
+    container.setState(ContainerState.READY);
+    serviceTimelinePublisher.componentInstanceUpdated(container,
+        COMPONENT_NAME);
+    lastPublishedEntities =
+        ((DummyTimelineClient) timelineClient).getLastPublishedEntities();
+    assertEquals(1, lastPublishedEntities.size());
+    entity = lastPublishedEntities.iterator().next();
+    assertEquals(2, entity.getEvents().size());
+    assertEquals(ContainerState.READY.toString(),
+        entity.getInfo().get(SliderTimelineMetricsConstants.STATE));
+
+  }
+
+  private void verifyServiceAttemptTimelineEntity(TimelineEntity timelineEntity,
+      int exitCode, String message, boolean isRegistedEntity) {
+    assertEquals(SERVICEID, timelineEntity.getId());
+    assertEquals(SERVICE_NAME,
+        timelineEntity.getInfo().get(SliderTimelineMetricsConstants.NAME));
+    if (isRegistedEntity) {
+      assertEquals(ApplicationState.STARTED.toString(),
+          timelineEntity.getInfo().get(SliderTimelineMetricsConstants.STATE));
+      assertEquals(SliderTimelineEvent.SERVICE_ATTEMPT_REGISTERED.toString(),
+          timelineEntity.getEvents().iterator().next().getId());
+    } else {
+      assertEquals("SUCCEEDED",
+          timelineEntity.getInfo().get(SliderTimelineMetricsConstants.STATE));
+      assertEquals(exitCode, timelineEntity.getInfo()
+          .get(SliderTimelineMetricsConstants.EXIT_STATUS_CODE));
+      assertEquals(message, timelineEntity.getInfo()
+          .get(SliderTimelineMetricsConstants.EXIT_REASON));
+
+      assertEquals(2, timelineEntity.getEvents().size());
+      assertEquals(SliderTimelineEvent.SERVICE_ATTEMPT_UNREGISTERED.toString(),
+          timelineEntity.getEvents().iterator().next().getId());
+    }
+  }
+
+  private void verifyComponentTimelineEntity(TimelineEntity entity) {
+    Map<String, Object> info = entity.getInfo();
+    assertEquals("DEFAULT", entity.getId());
+    assertEquals(ARTIFACTID,
+        info.get(SliderTimelineMetricsConstants.ARTIFACT_ID));
+    assertEquals("DOCKER",
+        info.get(SliderTimelineMetricsConstants.ARTIFACT_TYPE));
+    assertEquals("medium",
+        info.get(SliderTimelineMetricsConstants.RESOURCE_PROFILE));
+    assertEquals(1, info.get(SliderTimelineMetricsConstants.RESOURCE_CPU));
+    assertEquals("1024",
+        info.get(SliderTimelineMetricsConstants.RESOURCE_MEMORY));
+    assertEquals("sleep 1",
+        info.get(SliderTimelineMetricsConstants.LAUNCH_COMMAND));
+    assertEquals("false",
+        info.get(SliderTimelineMetricsConstants.UNIQUE_COMPONENT_SUPPORT));
+    assertEquals("false",
+        info.get(SliderTimelineMetricsConstants.RUN_PRIVILEGED_CONTAINER));
+    assertEquals("label",
+        info.get(SliderTimelineMetricsConstants.PLACEMENT_POLICY));
+  }
+
+  private static AppState createMockAppState() {
+    AppState appState = mock(AppState.class);
+    Application application = mock(Application.class);
+
+    when(application.getId()).thenReturn(SERVICEID);
+    when(application.getLaunchTime()).thenReturn(new Date());
+    when(application.getState()).thenReturn(ApplicationState.STARTED);
+    when(application.getName()).thenReturn(SERVICE_NAME);
+    when(application.getConfiguration())
+        .thenReturn(new org.apache.slider.api.resource.Configuration());
+
+    Component component = mock(Component.class);
+    Artifact artifact = new Artifact();
+    artifact.setId(ARTIFACTID);
+    Resource resource = new Resource();
+    resource.setCpus(1);
+    resource.setMemory(1024 + "");
+    resource.setProfile("medium");
+    when(component.getArtifact()).thenReturn(artifact);
+    when(component.getName()).thenReturn(COMPONENT_NAME);
+    when(component.getResource()).thenReturn(resource);
+    when(component.getLaunchCommand()).thenReturn("sleep 1");
+    PlacementPolicy placementPolicy = new PlacementPolicy();
+    placementPolicy.setLabel("label");
+    when(component.getPlacementPolicy()).thenReturn(placementPolicy);
+    when(component.getConfiguration())
+        .thenReturn(new org.apache.slider.api.resource.Configuration());
+    List<Component> components = new ArrayList<Component>();
+    components.add(component);
+
+    when(application.getComponents()).thenReturn(components);
+    when(appState.getClusterStatus()).thenReturn(application);
+    return appState;
+  }
+
+  public static void main(String[] args) {
+    Application application = createMockAppState().getClusterStatus();
+    System.out.println(application.getConfiguration());
+  }
+
+  protected static class DummyTimelineClient extends TimelineClientImpl {
+    private Map<Identifier, TimelineEntity> lastPublishedEntities =
+        new HashMap<>();
+
+    @Override
+    public void putEntitiesAsync(TimelineEntity... entities)
+        throws IOException, YarnException {
+      for (TimelineEntity timelineEntity : entities) {
+        TimelineEntity entity =
+            lastPublishedEntities.get(timelineEntity.getIdentifier());
+        if (entity == null) {
+          lastPublishedEntities.put(timelineEntity.getIdentifier(),
+              timelineEntity);
+        } else {
+          entity.addMetrics(timelineEntity.getMetrics());
+          entity.addEvents(timelineEntity.getEvents());
+          entity.addInfo(timelineEntity.getInfo());
+          entity.addConfigs(timelineEntity.getConfigs());
+          entity.addRelatesToEntities(timelineEntity.getRelatesToEntities());
+          entity
+              .addIsRelatedToEntities(timelineEntity.getIsRelatedToEntities());
+        }
+      }
+    }
+
+    public Collection<TimelineEntity> getLastPublishedEntities() {
+      return lastPublishedEntities.values();
+    }
+
+    public void reset() {
+      lastPublishedEntities = null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f9ea483/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/package-info.java
new file mode 100644
index 0000000..f274cd0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/timelineservice/package-info.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * ATS tests
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.slider.server.appmaster.timelineservice;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: YARN-6419. Support to launch new native-service from new YARN UI. Contributed by Akhil PB.

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-services.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-services.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-services.hbs
index 04788be..274217a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-services.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-services.hbs
@@ -74,6 +74,10 @@
             </div>
           </div>
         </div>
+
+        <div class="col-lg-4 container-fluid">
+          <a class="btn btn-primary pull-right" href="#/yarn-deploy-service">New Service</a>
+        </div>
       </div>
       {{#if model.apps}}
         {{em-table columns=serviceColumns rows=model.apps definition=tableDefinition}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js
new file mode 100644
index 0000000..d63b3c5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export default {
+  serviceName: "A unique application name",
+  queueName: "The YARN queue that this application should be submitted to",
+  lifetime: "Life time (in seconds) of the application from the time it reaches the STARTED state (after which it is automatically destroyed by YARN). For unlimited lifetime do not set a lifetime value.",
+  components: "One or more components of the application. If the application is HBase say, then the component can be a simple role like master or regionserver. If the application is a complex business webapp then a component can be other applications say Kafka or Storm. Thereby it opens up the support for complex and nested applications.",
+  configurations: "Set of configuration properties that can be injected into the application components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported.",
+  fileConfigs: "Set of file configurations that needs to be created and made available as a volume in an application component container."
+};

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/configs.env
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/configs.env b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/configs.env
index 04577c9..a795fc5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/configs.env
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/configs.env
@@ -40,6 +40,13 @@ ENV = {
      */
       //rmWebAddress: "localhost:8088",
 
+      /*
+       * Dash server web interface can be configured below.
+       * By default dash web address is set as localhost:9191, uncomment and change
+       * the following value for pointing to a different address.
+       */
+      //dashWebAddress: "localhost:9191",
+
     /*
      * Protocol scheme. It can be "http:" or "https:". By default, http is used.
      */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
index 8ab7ce1..5785d1d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
@@ -21,13 +21,16 @@ module.exports = { // Yarn UI App configurations
       localBaseAddress: "",
       timelineWebAddress: "localhost:8188",
       rmWebAddress: "localhost:8088",
+      dashWebAddress: "localhost:9191",
       protocolScheme: "http:"
     },
     namespaces: {
       timeline: 'ws/v1/applicationhistory',
       cluster: 'ws/v1/cluster',
       metrics: 'ws/v1/cluster/metrics',
-      node: '{nodeAddress}/ws/v1/node',
-      timelineV2: 'ws/v2/timeline'
+      timelineService: 'ws/v2/timeline/apps',
+      timelineV2: 'ws/v2/timeline',
+      dashService: 'services/v1/applications',
+      node: '{nodeAddress}/ws/v1/node'
     },
 };

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/deploy-service-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/deploy-service-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/deploy-service-test.js
new file mode 100644
index 0000000..ba855a7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/deploy-service-test.js
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForComponent, test } from 'ember-qunit';
+import hbs from 'htmlbars-inline-precompile';
+
+moduleForComponent('deploy-service', 'Integration | Component | deploy service', {
+  integration: true
+});
+
+test('it renders', function(assert) {
+
+  // Set any properties with this.set('myProperty', 'value');
+  // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL +
+
+  this.render(hbs`{{deploy-service}}`);
+
+  assert.equal(this.$().text().trim(), '');
+
+  // Template block usage:" + EOL +
+  this.render(hbs`
+    {{#deploy-service}}
+      template block text
+    {{/deploy-service}}
+  `);
+
+  assert.equal(this.$().text().trim(), 'template block text');
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/fileconfig-viewer-dialog-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/fileconfig-viewer-dialog-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/fileconfig-viewer-dialog-test.js
new file mode 100644
index 0000000..f99e08f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/fileconfig-viewer-dialog-test.js
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForComponent, test } from 'ember-qunit';
+import hbs from 'htmlbars-inline-precompile';
+
+moduleForComponent('fileconfig-viewer-dialog', 'Integration | Component | fileconfig viewer dialog', {
+  integration: true
+});
+
+test('it renders', function(assert) {
+
+  // Set any properties with this.set('myProperty', 'value');
+  // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL +
+
+  this.render(hbs`{{fileconfig-viewer-dialog}}`);
+
+  assert.equal(this.$().text().trim(), '');
+
+  // Template block usage:" + EOL +
+  this.render(hbs`
+    {{#fileconfig-viewer-dialog}}
+      template block text
+    {{/fileconfig-viewer-dialog}}
+  `);
+
+  assert.equal(this.$().text().trim(), 'template block text');
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/info-tooltip-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/info-tooltip-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/info-tooltip-test.js
new file mode 100644
index 0000000..7b0c1a1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/info-tooltip-test.js
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForComponent, test } from 'ember-qunit';
+import hbs from 'htmlbars-inline-precompile';
+
+moduleForComponent('info-tooltip', 'Integration | Component | info tooltip', {
+  integration: true
+});
+
+test('it renders', function(assert) {
+
+  // Set any properties with this.set('myProperty', 'value');
+  // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL +
+
+  this.render(hbs`{{info-tooltip}}`);
+
+  assert.equal(this.$().text().trim(), '');
+
+  // Template block usage:" + EOL +
+  this.render(hbs`
+    {{#info-tooltip}}
+      template block text
+    {{/info-tooltip}}
+  `);
+
+  assert.equal(this.$().text().trim(), 'template block text');
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-component-table-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-component-table-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-component-table-test.js
new file mode 100644
index 0000000..3ea27a5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-component-table-test.js
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForComponent, test } from 'ember-qunit';
+import hbs from 'htmlbars-inline-precompile';
+
+moduleForComponent('service-component-table', 'Integration | Component | service component table', {
+  integration: true
+});
+
+test('it renders', function(assert) {
+
+  // Set any properties with this.set('myProperty', 'value');
+  // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL +
+
+  this.render(hbs`{{service-component-table}}`);
+
+  assert.equal(this.$().text().trim(), '');
+
+  // Template block usage:" + EOL +
+  this.render(hbs`
+    {{#service-component-table}}
+      template block text
+    {{/service-component-table}}
+  `);
+
+  assert.equal(this.$().text().trim(), 'template block text');
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-config-table-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-config-table-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-config-table-test.js
new file mode 100644
index 0000000..39f269a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-config-table-test.js
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForComponent, test } from 'ember-qunit';
+import hbs from 'htmlbars-inline-precompile';
+
+moduleForComponent('service-config-table', 'Integration | Component | service config table', {
+  integration: true
+});
+
+test('it renders', function(assert) {
+
+  // Set any properties with this.set('myProperty', 'value');
+  // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL +
+
+  this.render(hbs`{{service-config-table}}`);
+
+  assert.equal(this.$().text().trim(), '');
+
+  // Template block usage:" + EOL +
+  this.render(hbs`
+    {{#service-config-table}}
+      template block text
+    {{/service-config-table}}
+  `);
+
+  assert.equal(this.$().text().trim(), 'template block text');
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-fileconfig-table-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-fileconfig-table-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-fileconfig-table-test.js
new file mode 100644
index 0000000..a486fa0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-fileconfig-table-test.js
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForComponent, test } from 'ember-qunit';
+import hbs from 'htmlbars-inline-precompile';
+
+moduleForComponent('service-fileconfig-table', 'Integration | Component | service fileconfig table', {
+  integration: true
+});
+
+test('it renders', function(assert) {
+
+  // Set any properties with this.set('myProperty', 'value');
+  // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL +
+
+  this.render(hbs`{{service-fileconfig-table}}`);
+
+  assert.equal(this.$().text().trim(), '');
+
+  // Template block usage:" + EOL +
+  this.render(hbs`
+    {{#service-fileconfig-table}}
+      template block text
+    {{/service-fileconfig-table}}
+  `);
+
+  assert.equal(this.$().text().trim(), 'template block text');
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/upload-config-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/upload-config-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/upload-config-test.js
new file mode 100644
index 0000000..64fdf4a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/upload-config-test.js
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForComponent, test } from 'ember-qunit';
+import hbs from 'htmlbars-inline-precompile';
+
+moduleForComponent('upload-config', 'Integration | Component | upload config', {
+  integration: true
+});
+
+test('it renders', function(assert) {
+
+  // Set any properties with this.set('myProperty', 'value');
+  // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL +
+
+  this.render(hbs`{{upload-config}}`);
+
+  assert.equal(this.$().text().trim(), '');
+
+  // Template block usage:" + EOL +
+  this.render(hbs`
+    {{#upload-config}}
+      template block text
+    {{/upload-config}}
+  `);
+
+  assert.equal(this.$().text().trim(), 'template block text');
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-servicedef-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-servicedef-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-servicedef-test.js
new file mode 100644
index 0000000..ea12bc5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-servicedef-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('adapter:yarn-servicedef', 'Unit | Adapter | yarn servicedef', {
+  // Specify the other units that are required for this test.
+  // needs: ['serializer:foo']
+});
+
+// Replace this with your real tests.
+test('it exists', function(assert) {
+  let adapter = this.subject();
+  assert.ok(adapter);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-deploy-service-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-deploy-service-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-deploy-service-test.js
new file mode 100644
index 0000000..c3918f4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-deploy-service-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('controller:yarn-deploy-service', 'Unit | Controller | yarn deploy service', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+// Replace this with your real tests.
+test('it exists', function(assert) {
+  let controller = this.subject();
+  assert.ok(controller);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-servicedef-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-servicedef-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-servicedef-test.js
new file mode 100644
index 0000000..141a94b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-servicedef-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForModel, test } from 'ember-qunit';
+
+moduleForModel('yarn-servicedef', 'Unit | Model | yarn servicedef', {
+  // Specify the other units that are required for this test.
+  needs: []
+});
+
+test('it exists', function(assert) {
+  let model = this.subject();
+  // let store = this.store();
+  assert.ok(!!model);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-deploy-service-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-deploy-service-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-deploy-service-test.js
new file mode 100644
index 0000000..4e2dcf1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-deploy-service-test.js
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('route:yarn-deploy-service', 'Unit | Route | yarn deploy service', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+test('it exists', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+});


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: YARN-6613. Update json validation for new native services providers. Contributed by Billie Rinaldi

Posted by ji...@apache.org.
YARN-6613. Update json validation for new native services providers. Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e57dddc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e57dddc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e57dddc9

Branch: refs/heads/yarn-native-services
Commit: e57dddc9eb7a6ccc47877548b16ec8ac8d6c281a
Parents: 62ceedf
Author: Jian He <ji...@apache.org>
Authored: Thu May 25 12:47:19 2017 -0700
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:26 2017 -0700

----------------------------------------------------------------------
 .../hadoop-yarn-services-api/pom.xml            |  57 +--
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |  12 +-
 .../api/impl/TestApplicationApiService.java     | 209 ----------
 .../apache/slider/api/resource/Application.java |   4 +
 .../apache/slider/api/resource/Component.java   |  41 +-
 .../slider/api/resource/Configuration.java      |  10 +-
 .../org/apache/slider/client/SliderClient.java  | 109 ++---
 .../slider/client/SliderYarnClientImpl.java     |  61 +++
 .../org/apache/slider/common/SliderKeys.java    | 128 +-----
 .../slider/common/tools/CoreFileSystem.java     |  64 ---
 .../apache/slider/common/tools/SliderUtils.java |  14 +-
 .../slider/core/persist/InstancePaths.java      |  58 ---
 .../providers/AbstractClientProvider.java       |  51 ++-
 .../slider/providers/SliderProviderFactory.java |  12 +-
 .../tarball/TarballProviderService.java         |   2 +-
 .../server/appmaster/SliderAppMaster.java       |  18 +-
 .../slider/util/RestApiErrorMessages.java       |   4 +-
 .../org/apache/slider/util/ServiceApiUtil.java  | 273 +++++++------
 .../slider/client/TestKeytabCommandOptions.java |  11 +-
 .../common/tools/TestMiscSliderUtils.java       |  49 ---
 .../apache/slider/core/conf/ExampleAppJson.java |  64 +++
 .../slider/core/conf/ExampleConfResources.java  |  58 ---
 .../core/conf/TestConfTreeLoadExamples.java     |  64 ---
 .../core/conf/TestConfigurationResolve.java     | 146 ++++++-
 .../slider/core/conf/TestExampleAppJson.java    |  79 ++++
 .../providers/TestAbstractClientProvider.java   | 121 ++++++
 .../TestBuildApplicationComponent.java          |  96 +++++
 .../slider/providers/TestDefaultProvider.java   |  60 +++
 .../model/appstate/BaseMockAppStateAATest.java  |   2 +-
 .../appstate/TestMockAppStateAAPlacement.java   |   2 +-
 .../TestMockAppStateContainerFailure.java       |   2 +-
 .../TestMockAppStateFlexDynamicRoles.java       |   5 +-
 .../TestMockAppStateRebuildOnAMRestart.java     |   2 +-
 .../appstate/TestMockAppStateUniqueNames.java   |   3 +-
 .../TestMockContainerResourceAllocations.java   |   2 +-
 .../model/mock/BaseMockAppStateTest.java        |  14 +-
 .../appmaster/model/mock/MockFactory.java       |   3 +
 .../apache/slider/utils/TestServiceApiUtil.java | 393 +++++++++++++++++++
 .../slider/utils/YarnMiniClusterTestBase.java   |  99 +++--
 .../slider/utils/YarnZKMiniClusterTestBase.java |   5 +-
 .../conf/examples/app-override-resolved.json    |  49 ---
 .../slider/core/conf/examples/app-override.json |  33 +-
 .../slider/core/conf/examples/app-resolved.json |  81 ----
 .../apache/slider/core/conf/examples/app.json   |  13 +-
 .../slider/core/conf/examples/default.json      |  16 +
 .../slider/core/conf/examples/external0.json    |   8 +
 .../slider/core/conf/examples/external1.json    |  30 ++
 .../slider/core/conf/examples/external2.json    |  22 ++
 48 files changed, 1539 insertions(+), 1120 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
index 4e88aef..bc714db 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
@@ -28,11 +28,6 @@
   <packaging>jar</packaging>
   <description>Hadoop YARN REST APIs for services</description>
 
-  <properties>
-    <test.failIfNoTests>false</test.failIfNoTests>
-    <powermock.version>1.6.5</powermock.version>
-  </properties>
-
   <build>
 
     <!-- resources are filtered for dynamic updates. This gets build info in-->
@@ -81,30 +76,10 @@
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-surefire-plugin</artifactId>
-        <version>${maven-surefire-plugin.version}</version>
         <configuration>
-          <reuseForks>${test.reuseForks}</reuseForks>
-          <forkMode>${test.forkMode}</forkMode>
-          <forkCount>1</forkCount>
-          <forkedProcessTimeoutInSeconds>${test.forkedProcessTimeoutInSeconds}
-          </forkedProcessTimeoutInSeconds>
-          <threadCount>1</threadCount>
-          <argLine>${test.argLine}</argLine>
-          <failIfNoTests>${test.failIfNoTests}</failIfNoTests>
-          <redirectTestOutputToFile>${build.redirect.test.output.to.file}</redirectTestOutputToFile>
           <environmentVariables>
-            <PATH>${test.env.path}</PATH>
+            <JAVA_HOME>${java.home}</JAVA_HOME>
           </environmentVariables>
-          <systemPropertyVariables>
-            <java.net.preferIPv4Stack>true</java.net.preferIPv4Stack>
-            <java.awt.headless>true</java.awt.headless>
-          </systemPropertyVariables>
-          <includes>
-            <include>**/Test*.java</include>
-          </includes>
-          <excludes>
-            <exclude>**/Test*$*.java</exclude>
-          </excludes>
         </configuration>
       </plugin>
 
@@ -121,13 +96,6 @@
       <version>${project.version}</version>
     </dependency>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-      <version>${project.version}</version>
-    </dependency>
-    <dependency>
       <groupId>io.swagger</groupId>
       <artifactId>swagger-annotations</artifactId>
     </dependency>
@@ -147,29 +115,6 @@
       <groupId>com.fasterxml.jackson.jaxrs</groupId>
       <artifactId>jackson-jaxrs-json-provider</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.powermock</groupId>
-      <artifactId>powermock-module-junit4</artifactId>
-      <version>${powermock.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.powermock</groupId>
-      <artifactId>powermock-api-easymock</artifactId>
-      <version>${powermock.version}</version>
-      <scope>test</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>org.easymock</groupId>
-          <artifactId>easymock</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
   </dependencies>
 
   <profiles>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
index 82cc30f..f8ed4d5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -158,7 +158,7 @@ definitions:
         type: string
         description: A unique application id.
       artifact:
-        description: Artifact of single-component applications. Mandatory if components attribute is not specified.
+        description: Artifact of single-component applications.
         $ref: '#/definitions/Artifact'
       resource:
         description: Resource of single-component applications or the global default for multi-component applications. Mandatory if it is a single-component application and if cpus and memory are not specified at the Application level.
@@ -230,16 +230,16 @@ definitions:
         type: string
         description: Assigns an app to a named partition of the cluster where the application desires to run (optional). If not specified all apps are submitted to a default label of the app owner. One or more labels can be setup for each application owner account with required constraints like no-preemption, sla-99999, preemption-ok, etc.
   Artifact:
-    description: Artifact of an application component.
+    description: Artifact of an application component. If not specified, component will just run the bare launch command and no artifact will be localized.
     required:
     - id
     properties:
       id:
         type: string
-        description: Artifact id. Examples are package location uri for tarball based apps, image name for docker, etc.
+        description: Artifact id. Examples are package location uri for tarball based apps, image name for docker, name of application, etc.
       type:
         type: string
-        description: Artifact type, like docker, tarball, etc. (optional).
+        description: Artifact type, like docker, tarball, etc. (optional). For TARBALL type, the specified tarball will be localized to the container local working directory under a folder named lib. For APPLICATION type, the application specified will be read and its components will be added into this application. The original component with artifact type APPLICATION will be removed (any properties specified in the original component will be ignored).
         enum:
           - DOCKER
           - TARBALL
@@ -269,7 +269,7 @@ definitions:
         $ref: '#/definitions/Artifact'
       launch_command:
         type: string
-        description: The custom launch command of this component (optional). When specified at the component level, it overrides the value specified at the global level (if any).
+        description: The custom launch command of this component (optional for DOCKER component, required otherwise). When specified at the component level, it overrides the value specified at the global level (if any).
       resource:
         description: Resource of this component (optional). If not specified, the application level global resource takes effect.
         $ref: '#/definitions/Resource'
@@ -344,7 +344,7 @@ definitions:
           - HADOOP_XML
       dest_file:
         type: string
-        description: The absolute path that this configuration file should be mounted as, in the application container.
+        description: The path that this configuration file should be created as. If it is an absolute path, it will be mounted into the DOCKER container. Absolute paths are only allowed for DOCKER containers.  If it is a relative path, only the file name should be provided, and the file will be created in the container local working directory under a folder named conf.
       src_file:
         type: string
         description: This provides the source location of the configuration file, the content of which is dumped to dest_file post property substitutions, in the format as specified in type. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java
deleted file mode 100644
index 6e077d2..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.api.impl;
-
-import static org.apache.slider.util.RestApiConstants.*;
-import static org.apache.slider.util.RestApiErrorMessages.*;
-
-import java.util.ArrayList;
-
-import org.apache.slider.api.resource.Application;
-import org.apache.slider.api.resource.Artifact;
-import org.apache.slider.api.resource.Resource;
-import org.apache.slider.util.ServiceApiUtil;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.powermock.core.classloader.annotations.SuppressStaticInitializationFor;
-import org.powermock.modules.junit4.PowerMockRunner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Test class for application life time monitor feature test.
- */
-@RunWith(PowerMockRunner.class)
-@SuppressStaticInitializationFor("org.apache.hadoop.yarn.services.api.impl.ApplicationApiService")
-public class TestApplicationApiService {
-  private static final Logger logger = LoggerFactory
-      .getLogger(TestApplicationApiService.class);
-  private static String EXCEPTION_PREFIX = "Should have thrown exception: ";
-  private static String NO_EXCEPTION_PREFIX = "Should not have thrown exception: ";
-  private ApplicationApiService appApiService;
-
-  @Before
-  public void setup() throws Exception {
-     appApiService = new ApplicationApiService();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-  }
-
-  @Test(timeout = 90000)
-  public void testValidateApplicationPostPayload() throws Exception {
-    Application app = new Application();
-
-    // no name
-    try {
-      ServiceApiUtil.validateApplicationPayload(app, null);
-      Assert.fail(EXCEPTION_PREFIX + "application with no name");
-    } catch (IllegalArgumentException e) {
-      Assert.assertEquals(ERROR_APPLICATION_NAME_INVALID, e.getMessage());
-    }
-
-    // bad format name
-    String[] badNames = { "4finance", "Finance", "finance@home" };
-    for (String badName : badNames) {
-      app.setName(badName);
-      try {
-        ServiceApiUtil.validateApplicationPayload(app, null);
-        Assert.fail(EXCEPTION_PREFIX + "application with bad name " + badName);
-      } catch (IllegalArgumentException e) {
-        Assert.assertEquals(ERROR_APPLICATION_NAME_INVALID_FORMAT,
-            e.getMessage());
-      }
-    }
-
-    // no artifact
-    app.setName("finance_home");
-    try {
-      ServiceApiUtil.validateApplicationPayload(app, null);
-      Assert.fail(EXCEPTION_PREFIX + "application with no artifact");
-    } catch (IllegalArgumentException e) {
-      Assert.assertEquals(ERROR_ARTIFACT_INVALID, e.getMessage());
-    }
-
-    // no artifact id
-    Artifact artifact = new Artifact();
-    app.setArtifact(artifact);
-    try {
-      ServiceApiUtil.validateApplicationPayload(app, null);
-      Assert.fail(EXCEPTION_PREFIX + "application with no artifact id");
-    } catch (IllegalArgumentException e) {
-      Assert.assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage());
-    }
-
-    // if artifact is of type APPLICATION then everything is valid here
-    artifact.setType(Artifact.TypeEnum.APPLICATION);
-    artifact.setId("app.io/hbase:facebook_0.2");
-    app.setNumberOfContainers(5l);
-    try {
-      ServiceApiUtil.validateApplicationPayload(app, null);
-    } catch (IllegalArgumentException e) {
-      logger.error("application attributes specified should be valid here", e);
-      Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
-    }
-
-    // default-component, default-lifetime and the property component_type
-    // should get assigned here
-    Assert.assertEquals(app.getComponents().get(0).getName(),
-        DEFAULT_COMPONENT_NAME);
-    Assert.assertEquals(app.getLifetime(), DEFAULT_UNLIMITED_LIFETIME);
-    //TODO handle external app
-
-    // unset artifact type, default component and no of containers to test other
-    // validation logic
-    artifact.setType(null);
-    app.setComponents(new ArrayList<>());
-    app.setNumberOfContainers(null);
-
-    // resource not specified
-    artifact.setId("docker.io/centos:centos7");
-    try {
-      ServiceApiUtil.validateApplicationPayload(app, null);
-      Assert.fail(EXCEPTION_PREFIX + "application with no resource");
-    } catch (IllegalArgumentException e) {
-      Assert.assertEquals(ERROR_RESOURCE_INVALID, e.getMessage());
-    }
-
-    // memory not specified
-    Resource res = new Resource();
-    app.setResource(res);
-    try {
-      ServiceApiUtil.validateApplicationPayload(app, null);
-      Assert.fail(EXCEPTION_PREFIX + "application with no memory");
-    } catch (IllegalArgumentException e) {
-      Assert.assertEquals(ERROR_RESOURCE_MEMORY_INVALID, e.getMessage());
-    }
-
-    // cpu does not need to be always specified, it's an optional feature in yarn
-    // invalid no of cpus
-    res.setMemory("100mb");
-    res.setCpus(-2);
-    try {
-      ServiceApiUtil.validateApplicationPayload(app, null);
-      Assert.fail(
-          EXCEPTION_PREFIX + "application with invalid no of cpups");
-    } catch (IllegalArgumentException e) {
-      Assert.assertEquals(ERROR_RESOURCE_CPUS_INVALID_RANGE, e.getMessage());
-    }
-
-    // number of containers not specified
-    res.setCpus(2);
-    try {
-      ServiceApiUtil.validateApplicationPayload(app, null);
-      Assert.fail(
-          EXCEPTION_PREFIX + "application with no container count");
-    } catch (IllegalArgumentException e) {
-      Assert.assertTrue(e.getMessage().contains(ERROR_CONTAINERS_COUNT_INVALID));
-    }
-
-    // specifying profile along with cpus/memory raises exception
-    res.setProfile("hbase_finance_large");
-    try {
-      ServiceApiUtil.validateApplicationPayload(app, null);
-      Assert.fail(EXCEPTION_PREFIX
-          + "application with resource profile along with cpus/memory");
-    } catch (IllegalArgumentException e) {
-      Assert.assertEquals(ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED,
-          e.getMessage());
-    }
-
-    // currently resource profile alone is not supported.
-    // TODO: remove the next test once it is supported.
-    res.setCpus(null);
-    res.setMemory(null);
-    try {
-      ServiceApiUtil.validateApplicationPayload(app, null);
-      Assert.fail(EXCEPTION_PREFIX
-          + "application with resource profile only - NOT SUPPORTED");
-    } catch (IllegalArgumentException e) {
-      Assert.assertEquals(ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET,
-          e.getMessage());
-    }
-
-    // unset profile here and add cpus/memory back
-    res.setProfile(null);
-    res.setCpus(2);
-    res.setMemory("2gb");
-
-    // everything valid here
-    app.setNumberOfContainers(5l);
-    try {
-      ServiceApiUtil.validateApplicationPayload(app, null);
-    } catch (IllegalArgumentException e) {
-      logger.error("application attributes specified should be valid here", e);
-      Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
-    }
-
-    // Now test with components
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Application.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Application.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Application.java
index 502b519..4b7e59b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Application.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Application.java
@@ -286,6 +286,10 @@ public class Application extends BaseResource {
     this.components = components;
   }
 
+  public void addComponent(Component component) {
+    components.add(component);
+  }
+
   public Component getComponent(String name) {
     for (Component component : components) {
       if (component.getName().equals(name)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Component.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Component.java
index e7f3796..229e288 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Component.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Component.java
@@ -50,9 +50,9 @@ public class Component implements Serializable {
   private String name = null;
   private List<String> dependencies = new ArrayList<String>();
   private ReadinessCheck readinessCheck = null;
-  private Artifact artifact = new Artifact();
+  private Artifact artifact = null;
   private String launchCommand = null;
-  private Resource resource = new Resource();
+  private Resource resource = null;
   private Long numberOfContainers = null;
   private Boolean uniqueComponentSupport = false;
   private Boolean runPrivilegedContainer = false;
@@ -406,4 +406,41 @@ public class Component implements Serializable {
     }
     return o.toString().replace("\n", "\n    ");
   }
+
+  /**
+   * Merge from another component into this component without overwriting.
+   */
+  public void mergeFrom(Component that) {
+    if (this.getArtifact() == null) {
+      this.setArtifact(that.getArtifact());
+    }
+    if (this.getResource() == null) {
+      this.setResource(that.getResource());
+    }
+    if (this.getNumberOfContainers() == null) {
+      this.setNumberOfContainers(that.getNumberOfContainers());
+    }
+    if (this.getLaunchCommand() == null) {
+      this.setLaunchCommand(that.getLaunchCommand());
+    }
+    this.getConfiguration().mergeFrom(that.getConfiguration());
+    if (this.getQuicklinks() == null) {
+      this.setQuicklinks(that.getQuicklinks());
+    }
+    if (this.getRunPrivilegedContainer() == null) {
+      this.setRunPrivilegedContainer(that.getRunPrivilegedContainer());
+    }
+    if (this.getUniqueComponentSupport() == null) {
+      this.setUniqueComponentSupport(that.getUniqueComponentSupport());
+    }
+    if (this.getDependencies() == null) {
+      this.setDependencies(that.getDependencies());
+    }
+    if (this.getPlacementPolicy() == null) {
+      this.setPlacementPolicy(that.getPlacementPolicy());
+    }
+    if (this.getReadinessCheck() == null) {
+      this.setReadinessCheck(that.getReadinessCheck());
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
index 37d1a40..e89306c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java
@@ -22,6 +22,7 @@ import com.fasterxml.jackson.annotation.JsonProperty;
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
 import org.apache.commons.lang.StringUtils;
+import org.apache.slider.common.tools.SliderUtils;
 
 import java.io.Serializable;
 import java.util.ArrayList;
@@ -197,8 +198,10 @@ public class Configuration implements Serializable {
    * this ConfigFile.
    */
   public synchronized void mergeFrom(Configuration that) {
-    this.properties.putAll(that.getProperties());
-    this.env.putAll(that.getEnv());
+    SliderUtils.mergeMapsIgnoreDuplicateKeys(this.properties, that
+        .getProperties());
+    SliderUtils.mergeMapsIgnoreDuplicateKeys(this.env, that.getEnv());
+
     Map<String, ConfigFile> thatMap = new HashMap<>();
     for (ConfigFile file : that.getFiles()) {
       thatMap.put(file.getDestFile(), file.copy());
@@ -206,7 +209,8 @@ public class Configuration implements Serializable {
     for (ConfigFile thisFile : files) {
       if(thatMap.containsKey(thisFile.getDestFile())) {
         ConfigFile thatFile = thatMap.get(thisFile.getDestFile());
-        thisFile.getProps().putAll(thatFile.getProps());
+        SliderUtils.mergeMapsIgnoreDuplicateKeys(thisFile.getProps(),
+            thatFile.getProps());
         thatMap.remove(thisFile.getDestFile());
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index 32d78b4..29ca471 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -19,7 +19,6 @@
 package org.apache.slider.client;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.io.Files;
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -101,6 +100,7 @@ import org.apache.slider.common.params.Arguments;
 import org.apache.slider.common.params.ClientArgs;
 import org.apache.slider.common.params.CommonArgs;
 import org.apache.slider.common.tools.ConfigHelper;
+import org.apache.slider.common.tools.Duration;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.common.tools.SliderUtils;
 import org.apache.slider.common.tools.SliderVersionInfo;
@@ -142,8 +142,6 @@ import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooDefs;
 import org.apache.zookeeper.data.ACL;
 import org.codehaus.jackson.map.PropertyNamingStrategy;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -636,16 +634,17 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   public int actionBuild(Application application) throws YarnException,
       IOException {
     Path appDir = checkAppNotExistOnHdfs(application);
+    ServiceApiUtil.validateAndResolveApplication(application, sliderFileSystem);
     persistApp(appDir, application);
+    deployedClusterName = application.getName();
     return EXIT_SUCCESS;
   }
 
   public ApplicationId actionCreate(Application application)
       throws IOException, YarnException {
-    ServiceApiUtil.validateApplicationPayload(application,
-        sliderFileSystem.getFileSystem());
     String appName = application.getName();
     validateClusterName(appName);
+    ServiceApiUtil.validateAndResolveApplication(application, sliderFileSystem);
     verifyNoLiveApp(appName, "Create");
     Path appDir = checkAppNotExistOnHdfs(application);
 
@@ -880,6 +879,14 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     return appDir;
   }
 
+  private Path checkAppExistOnHdfs(String appName)
+      throws IOException, SliderException {
+    Path appDir = sliderFileSystem.buildClusterDirPath(appName);
+    sliderFileSystem.verifyPathExists(
+        new Path(appDir, appName + ".json"));
+    return appDir;
+  }
+
   private void persistApp(Path appDir, Application application)
       throws IOException, SliderException {
     FsPermission appDirPermission = new FsPermission("750");
@@ -1125,7 +1132,9 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
       YarnException,
       IOException {
     if (clientInfo.install) {
-      return doClientInstall(clientInfo);
+      // TODO implement client install
+      throw new UnsupportedOperationException("Client install not yet " +
+          "supported");
     } else {
       throw new BadCommandArgumentsException(
           "Only install, keystore, and truststore commands are supported for the client.\n"
@@ -1134,66 +1143,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     }
   }
 
-  private int doClientInstall(ActionClientArgs clientInfo)
-      throws IOException, SliderException {
-
-    require(clientInfo.installLocation != null,
-          E_INVALID_INSTALL_LOCATION +"\n"
-          + CommonArgs.usage(serviceArgs, ACTION_CLIENT));
-    require(clientInfo.installLocation.exists(),
-        E_INSTALL_PATH_DOES_NOT_EXIST + ": " + clientInfo.installLocation.getAbsolutePath());
-
-    require(clientInfo.installLocation.isDirectory(),
-        E_INVALID_INSTALL_PATH + ": " + clientInfo.installLocation.getAbsolutePath());
-
-    File pkgFile;
-    File tmpDir = null;
-
-    require(isSet(clientInfo.packageURI) || isSet(clientInfo.name),
-        E_INVALID_APPLICATION_PACKAGE_LOCATION);
-    if (isSet(clientInfo.packageURI)) {
-      pkgFile = new File(clientInfo.packageURI);
-    } else {
-      Path appDirPath = sliderFileSystem.buildAppDefDirPath(clientInfo.name);
-      Path appDefPath = new Path(appDirPath, SliderKeys.DEFAULT_APP_PKG);
-      require(sliderFileSystem.isFile(appDefPath),
-          E_INVALID_APPLICATION_PACKAGE_LOCATION);
-      tmpDir = Files.createTempDir();
-      pkgFile = new File(tmpDir, SliderKeys.DEFAULT_APP_PKG);
-      sliderFileSystem.copyHdfsFileToLocal(appDefPath, pkgFile);
-    }
-    require(pkgFile.isFile(),
-        E_UNABLE_TO_READ_SUPPLIED_PACKAGE_FILE + " at %s", pkgFile.getAbsolutePath());
-
-    JSONObject config = null;
-    if(clientInfo.clientConfig != null) {
-      try {
-        byte[] encoded = Files.toByteArray(clientInfo.clientConfig);
-        config = new JSONObject(new String(encoded, Charset.defaultCharset()));
-      } catch (JSONException jsonEx) {
-        log.error("Unable to read supplied configuration at {}: {}",
-            clientInfo.clientConfig, jsonEx);
-        log.debug("Unable to read supplied configuration at {}: {}",
-            clientInfo.clientConfig, jsonEx, jsonEx);
-        throw new BadConfigException(E_MUST_BE_A_VALID_JSON_FILE, jsonEx);
-      }
-    }
-
-    // TODO handle client install
-    // Only INSTALL is supported
-    //    ClientProvider
-    //        provider = createClientProvider(SliderProviderFactory.DEFAULT_CLUSTER_TYPE);
-    //    provider.processClientOperation(sliderFileSystem,
-    //        getRegistryOperations(),
-    //        getConfig(),
-    //        "INSTALL",
-    //        clientInfo.installLocation,
-    //        pkgFile,
-    //        config,
-    //        clientInfo.name);
-    return EXIT_SUCCESS;
-  }
-
   @Override
   public int actionUpdate(String clustername,
       AbstractClusterBuildingActionArgs buildInfo) throws
@@ -1802,23 +1751,24 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   public int actionStart(String appName, ActionThawArgs thaw)
       throws YarnException, IOException {
     validateClusterName(appName);
+    Path appDir = checkAppExistOnHdfs(appName);
+    Application application = ServiceApiUtil.loadApplication(sliderFileSystem,
+        appName);
+    ServiceApiUtil.validateAndResolveApplication(application, sliderFileSystem);
     // see if it is actually running and bail out;
     verifyNoLiveApp(appName, "Thaw");
-    Path appDir = sliderFileSystem.buildClusterDirPath(appName);
-    Path appJson = new Path(appDir, appName + ".json");
-    Application application =
-        jsonSerDeser.load(sliderFileSystem.getFileSystem(), appJson);
-    submitApp(application);
+    ApplicationId appId = submitApp(application);
+    application.setId(appId.toString());
+    // write app definition on to hdfs
+    persistApp(appDir, application);
     return 0;
   }
 
   public Map<String, Long> flex(String appName, Map<String, Long>
       componentCounts) throws YarnException, IOException {
     validateClusterName(appName);
-    Path appDir = sliderFileSystem.buildClusterDirPath(appName);
-    Path appJson = new Path(appDir, appName + ".json");
-    Application persistedApp =
-        jsonSerDeser.load(sliderFileSystem.getFileSystem(), appJson);
+    Application persistedApp = ServiceApiUtil.loadApplication(sliderFileSystem,
+        appName);
     Map<String, Long> original = new HashMap<>(componentCounts.size());
     for (Component persistedComp : persistedApp.getComponents()) {
       String name = persistedComp.getName();
@@ -1833,7 +1783,8 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
           + " do not exist in app definition.");
     }
     jsonSerDeser
-        .save(sliderFileSystem.getFileSystem(), appJson, persistedApp, true);
+        .save(sliderFileSystem.getFileSystem(), ServiceApiUtil.getAppJsonPath(
+            sliderFileSystem, appName), persistedApp, true);
     log.info("Updated app definition file for components " + componentCounts
         .keySet());
 
@@ -2705,6 +2656,12 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
         yarnClient);
   }
 
+  @VisibleForTesting
+  public ApplicationReport monitorAppToRunning(Duration duration)
+      throws YarnException, IOException {
+    return yarnClient.monitorAppToState(applicationId, YarnApplicationState
+        .RUNNING, duration);
+  }
 }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderYarnClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderYarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderYarnClientImpl.java
index 4839395..306bd99 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderYarnClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderYarnClientImpl.java
@@ -245,4 +245,65 @@ public class SliderYarnClientImpl extends YarnClientImpl {
     }
     return results;
   }
+
+  /**
+   * Monitor the submitted application for reaching the requested state.
+   * Will also report if the app reaches a later state (failed, killed, etc)
+   * Kill application if duration!= null & time expires.
+   * @param appId Application Id of application to be monitored
+   * @param duration how long to wait -must be more than 0
+   * @param desiredState desired state.
+   * @return the application report -null on a timeout
+   * @throws YarnException
+   * @throws IOException
+   */
+  public ApplicationReport monitorAppToState(
+      ApplicationId appId, YarnApplicationState desiredState, Duration duration)
+      throws YarnException, IOException {
+
+    if (appId == null) {
+      throw new BadCommandArgumentsException("null application ID");
+    }
+    if (duration.limit <= 0) {
+      throw new BadCommandArgumentsException("Invalid monitoring duration");
+    }
+    log.debug("Waiting {} millis for app to reach state {} ",
+        duration.limit,
+        desiredState);
+    duration.start();
+    try {
+      while (true) {
+        // Get application report for the appId we are interested in
+
+        ApplicationReport r = getApplicationReport(appId);
+
+        log.debug("queried status is\n{}",
+            new SliderUtils.OnDemandReportStringifier(r));
+
+        YarnApplicationState state = r.getYarnApplicationState();
+        if (state.ordinal() >= desiredState.ordinal()) {
+          log.debug("App in desired state (or higher) :{}", state);
+          return r;
+        }
+        if (duration.getLimitExceeded()) {
+          log.debug(
+              "Wait limit of {} millis to get to state {}, exceeded; app " +
+                  "status\n {}",
+              duration.limit,
+              desiredState,
+              new SliderUtils.OnDemandReportStringifier(r));
+          return null;
+        }
+
+        // sleep 1s.
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException ignored) {
+          log.debug("Thread sleep in monitoring loop interrupted");
+        }
+      }
+    } finally {
+      duration.close();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
index 734fec5..865562e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
@@ -18,10 +18,6 @@
 
 package org.apache.slider.common;
 
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-
 /**
  * Keys and various constants for Slider
  */
@@ -73,17 +69,10 @@ public interface SliderKeys extends SliderXmlConfKeys {
   String APP_TYPE = "org-apache-slider";
 
   /**
-   * Key for component type. This MUST NOT be set in app_config/global {@value}
-   */
-  String COMPONENT_TYPE_KEY = "site.global.component_type";
-  /**
    * A component type for an external app that has been predefined using the
    * slider build command
    */
-  String COMPONENT_TYPE_EXTERNAL_APP = "external_app";
   String COMPONENT_SEPARATOR = "-";
-  List<String> COMPONENT_KEYS_TO_SKIP = Collections.unmodifiableList(Arrays
-      .asList("zookeeper.", "env.MALLOC_ARENA_MAX", "site.fs.", "site.dfs."));
 
   /**
    * A component type for a client component
@@ -91,48 +80,19 @@ public interface SliderKeys extends SliderXmlConfKeys {
   String COMPONENT_TYPE_CLIENT = "client";
 
   /**
-   * Key for application version. This must be set in app_config/global {@value}
+   * Key for application version.
    */
-  String APP_VERSION = "site.global.app_version";
   String APP_VERSION_UNKNOWN = "awaiting heartbeat...";
 
   /**
    * Keys for application container specific properties, like release timeout
    */
   String APP_CONTAINER_RELEASE_TIMEOUT = "site.global.app_container.release_timeout_secs";
-  int APP_CONTAINER_HEARTBEAT_INTERVAL_SEC = 10; // look for HEARTBEAT_IDDLE_INTERVAL_SEC
 
   /**
-   * JVM arg to force IPv4  {@value}
-   */
-  String JVM_ENABLE_ASSERTIONS = "-ea";
-  
-  /**
-   * JVM arg enable JVM system/runtime {@value}
+   * Subdirectories of HDFS cluster dir.
    */
-  String JVM_ENABLE_SYSTEM_ASSERTIONS = "-esa";
-
-  /**
-   * JVM arg to force IPv4  {@value}
-   */
-  String JVM_FORCE_IPV4 = "-Djava.net.preferIPv4Stack=true";
-
-  /**
-   * JVM arg to go headless  {@value}
-   */
-
-  String JVM_JAVA_HEADLESS = "-Djava.awt.headless=true";
-
-  /**
-   * This is the name of the dir/subdir containing
-   * the hbase conf that is propagated via YARN
-   *  {@value}
-   */
-  String PROPAGATED_CONF_DIR_NAME = "propagatedconf";
-  String INFRA_DIR_NAME = "infra";
-  String GENERATED_CONF_DIR_NAME = "generated";
-  String SNAPSHOT_CONF_DIR_NAME = "snapshot";
-  String DATA_DIR_NAME = "database";
+  String DATA_DIR_NAME = "data";
   String HISTORY_DIR_NAME = "history";
   String HISTORY_FILENAME_SUFFIX = "json";
   String HISTORY_FILENAME_PREFIX = "rolehistory-";
@@ -159,14 +119,6 @@ public interface SliderKeys extends SliderXmlConfKeys {
 
   String CLUSTER_DIRECTORY = "cluster";
 
-  String PACKAGE_DIRECTORY = "package";
-
-  /**
-   * JVM property to define the slider configuration directory;
-   * this is set by the slider script: {@value}
-   */
-  String PROPERTY_CONF_DIR = "slider.confdir";
-
   /**
    * JVM property to define the slider lib directory;
    * this is set by the slider script: {@value}
@@ -184,11 +136,6 @@ public interface SliderKeys extends SliderXmlConfKeys {
   String LOG4J_SERVER_PROP_FILENAME = "slideram-log4j.properties";
 
   /**
-   * Standard log4j file name  : {@value}
-   */
-  String LOG4J_PROP_FILENAME = "log4j.properties";
-
-  /**
    * Log4j sysprop to name the resource :{@value}
    */
   String SYSPROP_LOG4J_CONFIGURATION = "log4j.configuration";
@@ -209,9 +156,7 @@ public interface SliderKeys extends SliderXmlConfKeys {
    */
   String SLIDER_SERVER_XML = "slider-server.xml";
 
-  String TMP_LOGDIR_PREFIX = "/tmp/slider-";
   String TMP_DIR_PREFIX = "tmp";
-  String AM_DIR_PREFIX = "appmaster";
 
   /**
    * Store the default app definition, e.g. metainfo file or content of a folder
@@ -223,53 +168,11 @@ public interface SliderKeys extends SliderXmlConfKeys {
   String ADDONS_DIR = "addons";
 
   String SLIDER_JAR = "slider-core.jar";
-  String JCOMMANDER_JAR = "jcommander.jar";
-  String GSON_JAR = "gson.jar";
-  String DEFAULT_APP_PKG = "appPkg.zip";
 
-  String DEFAULT_JVM_HEAP = "256M";
-  int DEFAULT_YARN_MEMORY = 256;
   String STDOUT_AM = "slider-out.txt";
   String STDERR_AM = "slider-err.txt";
-  String DEFAULT_GC_OPTS = "";
 
   String HADOOP_USER_NAME = "HADOOP_USER_NAME";
-  String HADOOP_PROXY_USER = "HADOOP_PROXY_USER";
-  String SLIDER_PASSPHRASE = "SLIDER_PASSPHRASE";
-
-  boolean PROPAGATE_RESOURCE_OPTION = true;
-
-  /**
-   * Security associated keys.
-   */
-  String SECURITY_DIR = "security";
-  String CRT_FILE_NAME = "ca.crt";
-  String CSR_FILE_NAME = "ca.csr";
-  String KEY_FILE_NAME = "ca.key";
-  String KEYSTORE_FILE_NAME = "keystore.p12";
-  String CRT_PASS_FILE_NAME = "pass.txt";
-  String PASS_LEN = "50";
-
-  String COMP_STORES_REQUIRED_KEY =
-      "slider.component.security.stores.required";
-  String COMP_KEYSTORE_PASSWORD_PROPERTY_KEY =
-      "slider.component.keystore.password.property";
-  String COMP_KEYSTORE_PASSWORD_ALIAS_KEY =
-      "slider.component.keystore.credential.alias.property";
-  String COMP_KEYSTORE_PASSWORD_ALIAS_DEFAULT =
-      "component.keystore.credential.alias";
-  String COMP_TRUSTSTORE_PASSWORD_PROPERTY_KEY =
-      "slider.component.truststore.password.property";
-  String COMP_TRUSTSTORE_PASSWORD_ALIAS_KEY =
-      "slider.component.truststore.credential.alias.property";
-  String COMP_TRUSTSTORE_PASSWORD_ALIAS_DEFAULT =
-      "component.truststore.credential.alias";
-
-  /**
-   * Python specific
-   */
-  String PYTHONPATH = "PYTHONPATH";
-
 
   /**
    * Name of the AM filter to use: {@value}
@@ -277,34 +180,11 @@ public interface SliderKeys extends SliderXmlConfKeys {
   String AM_FILTER_NAME =
       "org.apache.hadoop.yarn.server.webproxy.amfilter.AmFilterInitializer";
 
-  /**
-   * Allowed port range. This MUST be set in app_conf/global.
-   * {@value}
-   */
-  String KEY_ALLOWED_PORT_RANGE = "site.global.slider.allowed.ports";
-
-  /**
-   * env var for custom JVM options.
-   */
-  String SLIDER_JVM_OPTS = "SLIDER_JVM_OPTS";
-
-  String SLIDER_CLASSPATH_EXTRA = "SLIDER_CLASSPATH_EXTRA";
   String YARN_CONTAINER_PATH = "/node/container/";
 
-  String GLOBAL_CONFIG_TAG = "global";
-  String SYSTEM_CONFIGS = "system_configs";
-  String JAVA_HOME = "java_home";
-  String TWO_WAY_SSL_ENABLED = "ssl.server.client.auth";
-  String INFRA_RUN_SECURITY_DIR = "infra/run/security/";
-  String CERT_FILE_LOCALIZATION_PATH = INFRA_RUN_SECURITY_DIR + "ca.crt";
-
-  String AM_CONFIG_GENERATION = "am.config.generation";
   String APP_CONF_DIR = "conf";
 
-  String APP_RESOURCES = "application.resources";
-  String APP_RESOURCES_DIR = "app/resources";
-
-  String APP_INSTALL_DIR = "app/install";
+  String APP_LIB_DIR = "lib";
 
   String OUT_FILE = "stdout.txt";
   String ERR_FILE = "stderr.txt";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
index 0c249d0..588d330 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
@@ -20,7 +20,6 @@ package org.apache.slider.common.tools;
 
 import com.google.common.base.Preconditions;
 
-import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -43,20 +42,15 @@ import org.apache.slider.core.exceptions.ErrorStrings;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.exceptions.UnknownApplicationInstanceException;
 import org.apache.slider.core.persist.Filenames;
-import org.apache.slider.core.persist.InstancePaths;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.File;
-import java.io.FilenameFilter;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.nio.charset.Charset;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.Enumeration;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipFile;
 
 import static org.apache.slider.common.SliderXmlConfKeys.CLUSTER_DIRECTORY_PERMISSIONS;
 import static org.apache.slider.common.SliderXmlConfKeys.DEFAULT_CLUSTER_DIRECTORY_PERMISSIONS;
@@ -153,34 +147,6 @@ public class CoreFileSystem {
   }
 
   /**
-   * Build up the path string for package install location -no attempt to
-   * create the directory is made
-   *
-   * @return the path for persistent app package
-   */
-  public Path buildPackageDirPath(String packageName, String packageVersion) {
-    Preconditions.checkNotNull(packageName);
-    Path path = getBaseApplicationPath();
-    path = new Path(path, SliderKeys.PACKAGE_DIRECTORY + "/" + packageName);
-    if (SliderUtils.isSet(packageVersion)) {
-      path = new Path(path, packageVersion);
-    }
-    return path;
-  }
-
-  /**
-   * Build up the path string for package install location -no attempt to
-   * create the directory is made
-   *
-   * @return the path for persistent app package
-   */
-  public Path buildClusterSecurityDirPath(String clusterName) {
-    Preconditions.checkNotNull(clusterName);
-    Path path = buildClusterDirPath(clusterName);
-    return new Path(path, SliderKeys.SECURITY_DIR);
-  }
-
-  /**
    * Build up the path string for keytab install location -no attempt to
    * create the directory is made
    *
@@ -390,36 +356,6 @@ public class CoreFileSystem {
   }
 
   /**
-   * Create the application-instance specific temporary directory
-   * in the DFS
-   *
-   * @param clustername name of the cluster
-   * @param subdir       application ID
-   * @return the path; this directory will already have been created
-   */
-  public Path createAppInstanceTempPath(String clustername, String subdir)
-      throws IOException {
-    Path tmp = getTempPathForCluster(clustername);
-    Path instancePath = new Path(tmp, subdir);
-    fileSystem.mkdirs(instancePath);
-    return instancePath;
-  }
-
-  /**
-   * Create the application-instance specific temporary directory
-   * in the DFS
-   *
-   * @param clustername name of the cluster
-   * @return the path; this directory will already have been deleted
-   */
-  public Path purgeAppInstanceTempFiles(String clustername) throws
-          IOException {
-    Path tmp = getTempPathForCluster(clustername);
-    fileSystem.delete(tmp, true);
-    return tmp;
-  }
-
-  /**
    * Get the base path
    *
    * @return the base path optionally configured by 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
index bc8e139..6dc51ec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
@@ -48,8 +48,6 @@ import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.slider.Slider;
 import org.apache.slider.api.RoleKeys;
-import org.apache.slider.api.resource.Application;
-import org.apache.slider.api.resource.Component;
 import org.apache.slider.api.types.ContainerInformation;
 import org.apache.slider.common.SliderKeys;
 import org.apache.slider.common.SliderXmlConfKeys;
@@ -2540,14 +2538,4 @@ public final class SliderUtils {
     long totalMinutes = days * 24 * 60 + hours * 24 + minutes;
     return totalMinutes * 60 + seconds;
   }
-
-  public static void resolve(Application application) {
-    org.apache.slider.api.resource.Configuration global = application
-        .getConfiguration();
-    for (Component component : application.getComponents()) {
-      mergeMapsIgnoreDuplicateKeys(component.getConfiguration().getProperties(),
-          global.getProperties());
-    }
-    // TODO merge other information to components
-  }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/InstancePaths.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/InstancePaths.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/InstancePaths.java
deleted file mode 100644
index 3505ac3..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/InstancePaths.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.core.persist;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.slider.common.SliderKeys;
-
-/**
- * Build up all the paths of an instance relative to the supplied instance
- * directory.
- */
-public class InstancePaths {
-
-  public final Path instanceDir;
-  public final Path snapshotConfPath;
-  public final Path generatedConfPath;
-  public final Path historyPath;
-  public final Path dataPath;
-  public final Path tmpPath;
-  public final Path tmpPathAM;
-  public final Path appDefPath;
-  public final Path addonsPath;
-
-  public InstancePaths(Path instanceDir) {
-    this.instanceDir = instanceDir;
-    snapshotConfPath =
-      new Path(instanceDir, SliderKeys.SNAPSHOT_CONF_DIR_NAME);
-    generatedConfPath =
-      new Path(instanceDir, SliderKeys.GENERATED_CONF_DIR_NAME);
-    historyPath = new Path(instanceDir, SliderKeys.HISTORY_DIR_NAME);
-    dataPath = new Path(instanceDir, SliderKeys.DATA_DIR_NAME);
-    tmpPath = new Path(instanceDir, SliderKeys.TMP_DIR_PREFIX);
-    tmpPathAM = new Path(tmpPath, SliderKeys.AM_DIR_PREFIX);
-    appDefPath = new Path(tmpPath, SliderKeys.APP_DEF_DIR);
-    addonsPath = new Path(tmpPath, SliderKeys.ADDONS_DIR);
-  }
-
-  @Override
-  public String toString() {
-    return "instance at " + instanceDir;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
index 185dcd4..ea92ff7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
@@ -18,8 +18,10 @@
 
 package org.apache.slider.providers;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.registry.client.api.RegistryOperations;
 import org.apache.slider.api.resource.Artifact;
 import org.apache.slider.api.resource.ConfigFile;
@@ -30,6 +32,7 @@ import org.codehaus.jettison.json.JSONObject;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.file.Paths;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
@@ -76,12 +79,50 @@ public abstract class AbstractClientProvider {
   /**
    * Validate the config files.
    * @param configFiles config file list
-   * @param fileSystem file system
+   * @param fs file system
    */
-  public void validateConfigFiles(List<ConfigFile> configFiles, FileSystem
-      fileSystem) throws IOException {
-    for (ConfigFile configFile : configFiles) {
-      validateConfigFile(configFile, fileSystem);
+  public void validateConfigFiles(List<ConfigFile> configFiles,
+      FileSystem fs) throws IOException {
+    Set<String> destFileSet = new HashSet<>();
+
+    for (ConfigFile file : configFiles) {
+      if (file.getType() == null) {
+        throw new IllegalArgumentException("File type is empty");
+      }
+
+      if (file.getType().equals(ConfigFile.TypeEnum.TEMPLATE) && StringUtils
+          .isEmpty(file.getSrcFile())) {
+        throw new IllegalArgumentException(
+            "Src_file is empty for " + ConfigFile.TypeEnum.TEMPLATE);
+
+      }
+      if (!StringUtils.isEmpty(file.getSrcFile())) {
+        Path p = new Path(file.getSrcFile());
+        if (!fs.exists(p)) {
+          throw new IllegalArgumentException(
+              "Src_file does not exist for config file: " + file
+                  .getSrcFile());
+        }
+      }
+
+      if (StringUtils.isEmpty(file.getDestFile())) {
+        throw new IllegalArgumentException("Dest_file is empty.");
+      }
+
+      if (destFileSet.contains(file.getDestFile())) {
+        throw new IllegalArgumentException(
+            "Duplicated ConfigFile exists: " + file.getDestFile());
+      }
+      destFileSet.add(file.getDestFile());
+
+      java.nio.file.Path destPath = Paths.get(file.getDestFile());
+      if (!destPath.isAbsolute() && destPath.getNameCount() > 1) {
+        throw new IllegalArgumentException("Non-absolute dest_file has more " +
+            "than one path element");
+      }
+
+      // provider-specific validation
+      validateConfigFile(file, fs);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/SliderProviderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/SliderProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/SliderProviderFactory.java
index 9c52643..5ecc374 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/SliderProviderFactory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/SliderProviderFactory.java
@@ -22,7 +22,6 @@ import org.apache.slider.api.resource.Artifact;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.providers.docker.DockerProviderFactory;
 import org.apache.slider.providers.tarball.TarballProviderFactory;
-import org.apache.slider.util.RestApiErrorMessages;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -30,7 +29,7 @@ import org.slf4j.LoggerFactory;
  * Base class for factories.
  */
 public abstract class SliderProviderFactory {
-  protected static final Logger log =
+  protected static final Logger LOG =
       LoggerFactory.getLogger(SliderProviderFactory.class);
 
   protected SliderProviderFactory() {}
@@ -58,10 +57,10 @@ public abstract class SliderProviderFactory {
   public static synchronized SliderProviderFactory createSliderProviderFactory(
       Artifact artifact) {
     if (artifact == null || artifact.getType() == null) {
-      log.info("Loading service provider type default");
+      LOG.debug("Loading service provider type default");
       return DefaultProviderFactory.getInstance();
     }
-    log.info("Loading service provider type {}", artifact.getType());
+    LOG.debug("Loading service provider type {}", artifact.getType());
     switch (artifact.getType()) {
       // TODO add handling for custom types?
       // TODO handle application
@@ -70,8 +69,9 @@ public abstract class SliderProviderFactory {
       case TARBALL:
         return TarballProviderFactory.getInstance();
       default:
-        throw new IllegalArgumentException(
-            RestApiErrorMessages.ERROR_ARTIFACT_INVALID);
+        throw new IllegalArgumentException(String.format("Resolution error, " +
+                "%s should not be passed to createSliderProviderFactory",
+            artifact.getType()));
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderService.java
index 65a55f0..9dd3499 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/tarball/TarballProviderService.java
@@ -45,6 +45,6 @@ public class TarballProviderService extends AbstractProviderService {
     LocalResourceType type = LocalResourceType.ARCHIVE;
     LocalResource packageResource = fileSystem.createAmResource(
         artifact, type);
-    launcher.addLocalResource(APP_INSTALL_DIR, packageResource);
+    launcher.addLocalResource(APP_LIB_DIR, packageResource);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
index 84dde08..0c3fcea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -107,7 +107,6 @@ import org.apache.slider.core.main.ExitCodeProvider;
 import org.apache.slider.core.main.LauncherExitCodes;
 import org.apache.slider.core.main.RunService;
 import org.apache.slider.core.main.ServiceLauncher;
-import org.apache.slider.core.persist.JsonSerDeser;
 import org.apache.slider.core.registry.info.CustomRegistryConstants;
 import org.apache.slider.providers.ProviderCompleted;
 import org.apache.slider.providers.ProviderService;
@@ -157,7 +156,7 @@ import org.apache.slider.server.services.workflow.ServiceThreadFactory;
 import org.apache.slider.server.services.workflow.WorkflowExecutorService;
 import org.apache.slider.server.services.workflow.WorkflowRpcService;
 import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
-import org.codehaus.jackson.map.PropertyNamingStrategy;
+import org.apache.slider.util.ServiceApiUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -389,9 +388,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    */
   private boolean securityEnabled;
   private ContentCache contentCache;
-  private static final JsonSerDeser<Application> jsonSerDeser =
-      new JsonSerDeser<Application>(Application.class,
-          PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES);
 
   /**
    * resource limits
@@ -590,9 +586,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     Path appDir = new Path((serviceArgs.getAppDefDir()));
     SliderFileSystem fs = getClusterFS();
     fs.setAppDir(appDir);
-    Path appJson = new Path(appDir, appName + ".json");
-    log.info("Loading application definition from " + appJson);
-    application = jsonSerDeser.load(fs.getFileSystem(), appJson);
+    application = ServiceApiUtil.loadApplication(fs, appName);
     log.info("Application Json: " + application);
     stateForProviders.setApplicationName(appName);
     Configuration serviceConf = getConfig();
@@ -821,7 +815,8 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       binding.releaseSelector =  new MostRecentContainerReleaseSelector();
       binding.nodeReports = nodeReports;
       binding.application = application;
-      binding.serviceHdfsDir = fs.buildClusterDirPath(appName).toString();
+      binding.serviceHdfsDir = new Path(fs.buildClusterDirPath(appName),
+          SliderKeys.DATA_DIR_NAME).toString();
       appState.buildInstance(binding);
 
       // build up environment variables that the AM wants set in every container
@@ -874,11 +869,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     scheduleFailureWindowResets(application.getConfiguration());
     scheduleEscalation(application.getConfiguration());
 
-    for (Component component : application.getComponents()) {
-      // Merge app-level configuration into component level configuration
-      component.getConfiguration().mergeFrom(application.getConfiguration());
-    }
-
     try {
       // schedule YARN Registry registration
       queue(new ActionRegisterServiceInstance(appName, appid, application));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
index ac89ed8..676db82 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java
@@ -21,7 +21,7 @@ public interface RestApiErrorMessages {
   String ERROR_APPLICATION_NAME_INVALID =
       "Application name is either empty or not provided";
   String ERROR_APPLICATION_NAME_INVALID_FORMAT =
-      "Application name is not valid - only lower case letters, digits,"
+      "Application name %s is not valid - only lower case letters, digits,"
           + " underscore and hyphen are allowed";
 
   String ERROR_APPLICATION_NOT_RUNNING = "Application not running";
@@ -76,7 +76,7 @@ public interface RestApiErrorMessages {
   String ERROR_ABSENT_NUM_OF_INSTANCE =
       "Num of instances should appear either globally or per component";
   String ERROR_ABSENT_LAUNCH_COMMAND =
-      "launch command should appear if type is slider-zip or none";
+      "Launch_command is required when type is not DOCKER";
 
   String ERROR_QUICKLINKS_FOR_COMP_INVALID = "Quicklinks specified at"
       + " component level, needs corresponding values set at application level";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
index d7c72a3..80a31c0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java
@@ -25,118 +25,158 @@ import org.apache.hadoop.fs.Path;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.Artifact;
 import org.apache.slider.api.resource.Component;
-import org.apache.slider.api.resource.ConfigFile;
 import org.apache.slider.api.resource.Configuration;
 import org.apache.slider.api.resource.Resource;
+import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.persist.JsonSerDeser;
+import org.apache.slider.providers.AbstractClientProvider;
+import org.apache.slider.providers.SliderProviderFactory;
+import org.codehaus.jackson.map.PropertyNamingStrategy;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.nio.file.Paths;
+import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
 public class ServiceApiUtil {
-  private static final Logger log =
+  private static final Logger LOG =
       LoggerFactory.getLogger(ServiceApiUtil.class);
+  private static JsonSerDeser<Application> jsonSerDeser =
+      new JsonSerDeser<>(Application.class,
+          PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES);
+
+  @VisibleForTesting
+  public static void setJsonSerDeser(JsonSerDeser jsd) {
+    jsonSerDeser = jsd;
+  }
+
   @VisibleForTesting
-  public static void validateApplicationPayload(Application application,
-      FileSystem fs) throws IOException {
+  public static void validateAndResolveApplication(Application application,
+      SliderFileSystem fs) throws IOException {
     if (StringUtils.isEmpty(application.getName())) {
       throw new IllegalArgumentException(
           RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID);
     }
     if (!SliderUtils.isClusternameValid(application.getName())) {
-      throw new IllegalArgumentException(
-          RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID_FORMAT);
+      throw new IllegalArgumentException(String.format(
+          RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID_FORMAT,
+          application.getName()));
     }
 
     // If the application has no components do top-level checks
     if (!hasComponent(application)) {
-      // artifact
-      if (application.getArtifact() == null) {
-        throw new IllegalArgumentException(
-            RestApiErrorMessages.ERROR_ARTIFACT_INVALID);
-      }
-      if (StringUtils.isEmpty(application.getArtifact().getId())) {
-        throw new IllegalArgumentException(
-            RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID);
-      }
-
-      // If artifact is of type APPLICATION, add a slider specific property
-      if (application.getArtifact().getType()
-          == Artifact.TypeEnum.APPLICATION) {
-        if (application.getConfiguration() == null) {
-          application.setConfiguration(new Configuration());
+      // If artifact is of type APPLICATION, read other application components
+      if (application.getArtifact() != null && application.getArtifact()
+          .getType() == Artifact.TypeEnum.APPLICATION) {
+        if (StringUtils.isEmpty(application.getArtifact().getId())) {
+          throw new IllegalArgumentException(
+              RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID);
         }
+        Application otherApplication = loadApplication(fs,
+            application.getArtifact().getId());
+        application.setComponents(otherApplication.getComponents());
+        application.setArtifact(null);
+        SliderUtils.mergeMapsIgnoreDuplicateKeys(application.getQuicklinks(),
+            otherApplication.getQuicklinks());
+      } else {
+        // Since it is a simple app with no components, create a default
+        // component
+        Component comp = createDefaultComponent(application);
+        validateComponent(comp, fs.getFileSystem());
+        application.getComponents().add(comp);
+        if (application.getLifetime() == null) {
+          application.setLifetime(RestApiConstants.DEFAULT_UNLIMITED_LIFETIME);
+        }
+        return;
       }
-      // resource
-      validateApplicationResource(application.getResource(), null,
-          application.getArtifact().getType());
+    }
 
-      // container size
-      if (application.getNumberOfContainers() == null
-          || application.getNumberOfContainers() < 0) {
-        throw new IllegalArgumentException(
-            RestApiErrorMessages.ERROR_CONTAINERS_COUNT_INVALID + ": "
-                + application.getNumberOfContainers());
+    // Validate there are no component name collisions (collisions are not
+    // currently supported) and add any components from external applications
+    // TODO allow name collisions? see AppState#roles
+    // TODO or add prefix to external component names?
+    Configuration globalConf = application.getConfiguration();
+    Set<String> componentNames = new HashSet<>();
+    List<Component> componentsToRemove = new ArrayList<>();
+    List<Component> componentsToAdd = new ArrayList<>();
+    for (Component comp : application.getComponents()) {
+      if (componentNames.contains(comp.getName())) {
+        throw new IllegalArgumentException("Component name collision: " +
+            comp.getName());
       }
-      validateConfigFile(application.getConfiguration().getFiles(), fs);
-      // Since it is a simple app with no components, create a default component
-      application.getComponents().add(createDefaultComponent(application));
-    } else {
-      // If the application has components, then run checks for each component.
-      // Let global values take effect if component level values are not
-      // provided.
-      Artifact globalArtifact = application.getArtifact();
-      Resource globalResource = application.getResource();
-      Long globalNumberOfContainers = application.getNumberOfContainers();
-      for (Component comp : application.getComponents()) {
-        // artifact
-        if (comp.getArtifact() == null) {
-          comp.setArtifact(globalArtifact);
-        }
-        // If still null raise validation exception
-        if (comp.getArtifact() == null) {
-          throw new IllegalArgumentException(String
-              .format(RestApiErrorMessages.ERROR_ARTIFACT_FOR_COMP_INVALID,
-                  comp.getName()));
-        }
+      // If artifact is of type APPLICATION (which cannot be filled from
+      // global), read external application and add its components to this
+      // application
+      if (comp.getArtifact() != null && comp.getArtifact().getType() ==
+          Artifact.TypeEnum.APPLICATION) {
         if (StringUtils.isEmpty(comp.getArtifact().getId())) {
-          throw new IllegalArgumentException(String
-              .format(RestApiErrorMessages.ERROR_ARTIFACT_ID_FOR_COMP_INVALID,
-                  comp.getName()));
+          throw new IllegalArgumentException(
+              RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID);
         }
-
-        // If artifact is of type APPLICATION, add a slider specific property
-        if (comp.getArtifact().getType() == Artifact.TypeEnum.APPLICATION) {
-          if (comp.getConfiguration() == null) {
-            comp.setConfiguration(new Configuration());
+        LOG.info("Marking {} for removal", comp.getName());
+        componentsToRemove.add(comp);
+        List<Component> externalComponents = getApplicationComponents(fs,
+            comp.getArtifact().getId());
+        for (Component c : externalComponents) {
+          Component override = application.getComponent(c.getName());
+          if (override != null && override.getArtifact() == null) {
+            // allow properties from external components to be overridden /
+            // augmented by properties in this component, except for artifact
+            // which must be read from external component
+            override.mergeFrom(c);
+            LOG.info("Merging external component {} from external {}", c
+                .getName(), comp.getName());
+          } else {
+            if (componentNames.contains(c.getName())) {
+              throw new IllegalArgumentException("Component name collision: " +
+                  c.getName());
+            }
+            componentNames.add(c.getName());
+            componentsToAdd.add(c);
+            LOG.info("Adding component {} from external {}", c.getName(),
+                comp.getName());
           }
-          comp.setName(comp.getArtifact().getId());
-        }
-
-        // resource
-        if (comp.getResource() == null) {
-          comp.setResource(globalResource);
         }
-        validateApplicationResource(comp.getResource(), comp,
-            comp.getArtifact().getType());
+      } else {
+        // otherwise handle as a normal component
+        componentNames.add(comp.getName());
+        // configuration
+        comp.getConfiguration().mergeFrom(globalConf);
+      }
+    }
+    application.getComponents().removeAll(componentsToRemove);
+    application.getComponents().addAll(componentsToAdd);
 
-        // container count
-        if (comp.getNumberOfContainers() == null) {
-          comp.setNumberOfContainers(globalNumberOfContainers);
-        }
-        if (comp.getNumberOfContainers() == null
-            || comp.getNumberOfContainers() < 0) {
-          throw new IllegalArgumentException(String.format(
-              RestApiErrorMessages.ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID
-                  + ": " + comp.getNumberOfContainers(), comp.getName()));
-        }
-        validateConfigFile(comp.getConfiguration().getFiles(), fs);
+    // Validate components and let global values take effect if component level
+    // values are not provided
+    Artifact globalArtifact = application.getArtifact();
+    Resource globalResource = application.getResource();
+    Long globalNumberOfContainers = application.getNumberOfContainers();
+    String globalLaunchCommand = application.getLaunchCommand();
+    for (Component comp : application.getComponents()) {
+      // fill in global artifact unless it is type APPLICATION
+      if (comp.getArtifact() == null && application.getArtifact() != null
+          && application.getArtifact().getType() != Artifact.TypeEnum
+          .APPLICATION) {
+        comp.setArtifact(globalArtifact);
+      }
+      // fill in global resource
+      if (comp.getResource() == null) {
+        comp.setResource(globalResource);
       }
+      // fill in global container count
+      if (comp.getNumberOfContainers() == null) {
+        comp.setNumberOfContainers(globalNumberOfContainers);
+      }
+      // fill in global launch command
+      if (comp.getLaunchCommand() == null) {
+        comp.setLaunchCommand(globalLaunchCommand);
+      }
+      validateComponent(comp, fs.getFileSystem());
     }
 
     // Application lifetime if not specified, is set to unlimited lifetime
@@ -145,52 +185,54 @@ public class ServiceApiUtil {
     }
   }
 
-  // 1) Verify the src_file exists and non-empty for template
-  // 2) dest_file is absolute path
-  private static void validateConfigFile(List<ConfigFile> list, FileSystem fs)
+  public static void validateComponent(Component comp, FileSystem fs)
       throws IOException {
-    Set<String> destFileSet = new HashSet<>();
+    AbstractClientProvider compClientProvider = SliderProviderFactory
+        .getClientProvider(comp.getArtifact());
+    compClientProvider.validateArtifact(comp.getArtifact(), fs);
 
-    for (ConfigFile file : list) {
-      if (file.getType().equals(ConfigFile.TypeEnum.TEMPLATE) && StringUtils
-          .isEmpty(file.getSrcFile())) {
-        throw new IllegalArgumentException(
-            "Src_file is empty for " + ConfigFile.TypeEnum.TEMPLATE);
+    if (comp.getLaunchCommand() == null && (comp.getArtifact() == null || comp
+        .getArtifact().getType() != Artifact.TypeEnum.DOCKER)) {
+      throw new IllegalArgumentException(RestApiErrorMessages
+          .ERROR_ABSENT_LAUNCH_COMMAND);
+    }
 
-      }
-      if (!StringUtils.isEmpty(file.getSrcFile())) {
-        Path p = new Path(file.getSrcFile());
-        if (!fs.exists(p)) {
-          throw new IllegalArgumentException(
-              "Src_file does not exist for config file: " + file
-                  .getSrcFile());
-        }
-      }
+    validateApplicationResource(comp.getResource(), comp);
 
-      if (StringUtils.isEmpty(file.getDestFile())) {
-        throw new IllegalArgumentException("Dest_file is empty.");
-      }
-      // validate dest_file is absolute
-      if (!Paths.get(file.getDestFile()).isAbsolute()) {
-        throw new IllegalArgumentException(
-            "Dest_file must be absolute path: " + file.getDestFile());
-      }
-
-      if (destFileSet.contains(file.getDestFile())) {
-        throw new IllegalArgumentException(
-            "Duplicated ConfigFile exists: " + file.getDestFile());
-      }
-      destFileSet.add(file.getDestFile());
+    if (comp.getNumberOfContainers() == null
+        || comp.getNumberOfContainers() < 0) {
+      throw new IllegalArgumentException(String.format(
+          RestApiErrorMessages.ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID
+              + ": " + comp.getNumberOfContainers(), comp.getName()));
     }
+    compClientProvider.validateConfigFiles(comp.getConfiguration()
+        .getFiles(), fs);
+  }
+
+  @VisibleForTesting
+  public static List<Component> getApplicationComponents(SliderFileSystem
+      fs, String appName) throws IOException {
+    return loadApplication(fs, appName).getComponents();
   }
 
+  public static Application loadApplication(SliderFileSystem fs, String
+      appName) throws IOException {
+    Path appJson = getAppJsonPath(fs, appName);
+    LOG.info("Loading application definition from " + appJson);
+    Application externalApplication = jsonSerDeser.load(fs.getFileSystem(),
+        appJson);
+    return externalApplication;
+  }
+
+  public static Path getAppJsonPath(SliderFileSystem fs, String appName) {
+    Path appDir = fs.buildClusterDirPath(appName);
+    Path appJson = new Path(appDir, appName + ".json");
+    return appJson;
+  }
 
   private static void validateApplicationResource(Resource resource,
-      Component comp, Artifact.TypeEnum artifactType) {
+      Component comp) {
     // Only apps/components of type APPLICATION can skip resource requirement
-    if (resource == null && artifactType == Artifact.TypeEnum.APPLICATION) {
-      return;
-    }
     if (resource == null) {
       throw new IllegalArgumentException(
           comp == null ? RestApiErrorMessages.ERROR_RESOURCE_INVALID : String
@@ -255,6 +297,7 @@ public class ServiceApiUtil {
     comp.setResource(app.getResource());
     comp.setNumberOfContainers(app.getNumberOfContainers());
     comp.setLaunchCommand(app.getLaunchCommand());
+    comp.setConfiguration(app.getConfiguration());
     return comp;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e57dddc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestKeytabCommandOptions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestKeytabCommandOptions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestKeytabCommandOptions.java
index 07d8c10..59ccda7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestKeytabCommandOptions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestKeytabCommandOptions.java
@@ -34,6 +34,7 @@ import org.apache.slider.core.exceptions.BadCommandArgumentsException;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.main.ServiceLauncher;
 import org.apache.slider.utils.SliderTestBase;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -50,6 +51,7 @@ import java.util.UUID;
 public class TestKeytabCommandOptions extends SliderTestBase {
 
   private static SliderFileSystem testFileSystem;
+  private File testFolderDir;
 
   @Before
   public void setupFilesystem() throws IOException {
@@ -57,11 +59,18 @@ public class TestKeytabCommandOptions extends SliderTestBase {
     YarnConfiguration configuration = SliderUtils.createConfiguration();
     fileSystem.setConf(configuration);
     testFileSystem = new SliderFileSystem(fileSystem, configuration);
-    File testFolderDir = new File(testFileSystem
+    testFolderDir = new File(testFileSystem
         .buildKeytabInstallationDirPath("").toUri().getPath());
     FileUtils.deleteDirectory(testFolderDir);
   }
 
+  @After
+  public void cleanup() throws IOException {
+    if (testFolderDir != null && testFolderDir.exists()) {
+      FileUtils.deleteDirectory(testFolderDir);
+    }
+  }
+
   @Test
   public void testInstallKeytab() throws Throwable {
     // create a mock keytab file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: YARN-6255. Refactor yarn-native-services framework. Contributed by Jian He

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java
index 86d87ac..8b88c28 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java
@@ -22,7 +22,6 @@ import org.apache.slider.common.SliderKeys;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.core.conf.AggregateConf;
 import org.apache.slider.core.conf.ConfTreeOperations;
-import org.apache.slider.core.exceptions.BadConfigException;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.providers.AbstractClientProvider;
 import org.apache.slider.providers.ProviderRole;
@@ -30,14 +29,10 @@ import org.apache.slider.providers.ProviderUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
-import java.util.Map;
 import java.util.Set;
 
-import static org.apache.slider.providers.docker.DockerKeys.DOCKER_IMAGE;
-
 public class DockerClientProvider extends AbstractClientProvider
     implements SliderKeys {
 
@@ -64,35 +59,7 @@ public class DockerClientProvider extends AbstractClientProvider
   public void validateInstanceDefinition(AggregateConf instanceDefinition,
       SliderFileSystem fs) throws SliderException {
     super.validateInstanceDefinition(instanceDefinition, fs);
-
-    ConfTreeOperations appConf = instanceDefinition.getAppConfOperations();
-    ConfTreeOperations resources = instanceDefinition.getResourceOperations();
-
-    for (String roleGroup : resources.getComponentNames()) {
-      if (roleGroup.equals(COMPONENT_AM)) {
-        continue;
-      }
-      if (appConf.getComponentOpt(roleGroup, DOCKER_IMAGE, null) == null &&
-          appConf.getGlobalOptions().get(DOCKER_IMAGE) == null) {
-        throw new BadConfigException("Property " + DOCKER_IMAGE + " not " +
-            "specified for " + roleGroup);
-      }
-
-      providerUtils.getPackages(roleGroup, appConf);
-
-      if (appConf.getComponentOptBool(roleGroup, AM_CONFIG_GENERATION, false)) {
-        // build and localize configuration files
-        Map<String, Map<String, String>> configurations =
-            providerUtils.buildConfigurations(appConf, appConf, null,
-                null, roleGroup, roleGroup, null);
-        try {
-          providerUtils.localizeConfigFiles(null, roleGroup, roleGroup, appConf,
-              configurations, null, fs, null);
-        } catch (IOException e) {
-          throw new BadConfigException(e.toString());
-        }
-      }
-    }
+    //TODO validate Application payload, part of that is already done in ApplicationApiService, need to do more
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
index 63416cc..511f7bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
@@ -17,286 +17,129 @@
  */
 package org.apache.slider.providers.docker;
 
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.hadoop.yarn.api.records.LocalResource;
-import org.apache.hadoop.yarn.api.records.LocalResourceType;
-import org.apache.slider.api.ClusterDescription;
-import org.apache.slider.api.ClusterNode;
-import org.apache.slider.api.OptionKeys;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
+import org.apache.slider.api.resource.ContainerState;
 import org.apache.slider.common.SliderKeys;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.common.tools.SliderUtils;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTreeOperations;
-import org.apache.slider.core.conf.MapOperations;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.launch.CommandLineBuilder;
 import org.apache.slider.core.launch.ContainerLauncher;
-import org.apache.slider.core.registry.docstore.ConfigFormat;
-import org.apache.slider.core.registry.docstore.ConfigUtils;
-import org.apache.slider.core.registry.docstore.ExportEntry;
-import org.apache.slider.providers.AbstractProviderService;
-import org.apache.slider.providers.MonitorDetail;
-import org.apache.slider.providers.ProviderCore;
+import org.apache.slider.core.registry.docstore.PublishedConfiguration;
 import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.providers.ProviderService;
 import org.apache.slider.providers.ProviderUtils;
 import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.StateAccessForProviders;
+import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
 import java.io.IOException;
-import java.net.URL;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.Scanner;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.regex.Pattern;
 
-public class DockerProviderService extends AbstractProviderService implements
-    ProviderCore,
-    DockerKeys,
-    SliderKeys {
+public class DockerProviderService extends AbstractService
+    implements ProviderService, DockerKeys, SliderKeys {
 
   protected static final Logger log =
       LoggerFactory.getLogger(DockerProviderService.class);
   private static final ProviderUtils providerUtils = new ProviderUtils(log);
-  private static final String EXPORT_GROUP = "quicklinks";
-  private static final String APPLICATION_TAG = "application";
-  private static final String HOST_KEY_FORMAT = "${%s_HOST}";
-  private static final String IP_KEY_FORMAT = "${%s_IP}";
-  private static final String VARIABLE_INDICATOR = "${";
-
-  private String clusterName = null;
-  private SliderFileSystem fileSystem = null;
-
-  private final Map<String, Set<ExportEntry>> exportMap =
-      new ConcurrentHashMap<>();
+  private static final String QUICK_LINKS = "quicklinks";
+  protected StateAccessForProviders amState;
+  protected YarnRegistryViewForProviders yarnRegistry;
 
   protected DockerProviderService() {
     super("DockerProviderService");
   }
 
   @Override
-  public List<ProviderRole> getRoles() {
-    return Collections.emptyList();
-  }
-
-  @Override
-  public boolean isSupportedRole(String role) {
-    return true;
+  public void setAMState(StateAccessForProviders stateAccessor) {
+    this.amState = stateAccessor;
   }
 
   @Override
-  public void validateInstanceDefinition(AggregateConf instanceDefinition)
-      throws SliderException {
+  public void bindToYarnRegistry(YarnRegistryViewForProviders yarnRegistry) {
+    this.yarnRegistry = yarnRegistry;
   }
 
-  private String getClusterName() {
-    if (SliderUtils.isUnset(clusterName)) {
-      clusterName = getAmState().getInternalsSnapshot().get(OptionKeys.APPLICATION_NAME);
-    }
-    return clusterName;
-  }
-
-  @Override
   public void buildContainerLaunchContext(ContainerLauncher launcher,
-      AggregateConf instanceDefinition, Container container,
-      ProviderRole providerRole, SliderFileSystem fileSystem,
-      Path generatedConfPath, MapOperations resourceComponent,
-      MapOperations appComponent, Path containerTmpDirPath)
+      Application application, Container container, ProviderRole providerRole,
+      SliderFileSystem fileSystem)
       throws IOException, SliderException {
 
     String roleName = providerRole.name;
     String roleGroup = providerRole.group;
 
-    log.info("Build launch context for Docker");
-    log.debug(instanceDefinition.toString());
-
-    ConfTreeOperations appConf = instanceDefinition.getAppConfOperations();
+    Component component = providerRole.component;
     launcher.setYarnDockerMode(true);
-    launcher.setDockerImage(appConf.getComponentOpt(roleGroup, DOCKER_IMAGE,
-        null));
-    launcher.setDockerNetwork(appConf.getComponentOpt(roleGroup, DOCKER_NETWORK,
-        DEFAULT_DOCKER_NETWORK));
-    launcher.setRunPrivilegedContainer(appConf.getComponentOptBool(roleGroup,
-        DOCKER_USE_PRIVILEGED, DEFAULT_DOCKER_USE_PRIVILEGED));
-
-    // Set the environment
-    Map<String, String> standardTokens = providerUtils.getStandardTokenMap(
-        getAmState().getAppConfSnapshot(), getAmState().getInternalsSnapshot(),
-        roleName, roleGroup, container.getId().toString(), getClusterName());
-    Map<String, String> replaceTokens = providerUtils.filterSiteOptions(
-            appConf.getComponent(roleGroup).options, standardTokens);
-    replaceTokens.putAll(standardTokens);
-    launcher.putEnv(SliderUtils.buildEnvMap(appComponent, replaceTokens));
-
-    String workDir = ApplicationConstants.Environment.PWD.$();
-    launcher.setEnv("WORK_DIR", workDir);
-    log.info("WORK_DIR set to {}", workDir);
-    String logDir = ApplicationConstants.LOG_DIR_EXPANSION_VAR;
-    launcher.setEnv("LOG_DIR", logDir);
-    log.info("LOG_DIR set to {}", logDir);
+    launcher.setDockerImage(component.getArtifact().getId());
+    launcher.setDockerNetwork(component.getConfiguration()
+        .getProperty(DOCKER_NETWORK, DEFAULT_DOCKER_NETWORK));
+    launcher.setRunPrivilegedContainer(component.getRunPrivilegedContainer());
+
+    // Generate tokens (key-value pair) for config substitution.
+    Map<String, String> standardTokens = providerUtils
+        .getStandardTokenMap(application.getConfiguration(),
+            component.getConfiguration(), roleName, roleGroup,
+            container.getId().toString(), application.getName());
+    Map<String, String> tokensForSubstitution = providerUtils.substituteConfigs(
+            component.getConfiguration().getProperties(), standardTokens);
+
+    tokensForSubstitution.putAll(standardTokens);
+
+    // Set the environment variables
+    launcher.putEnv(SliderUtils
+        .buildEnvMap(component.getConfiguration(), tokensForSubstitution));
+    launcher.setEnv("WORK_DIR", ApplicationConstants.Environment.PWD.$());
+    launcher.setEnv("LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR);
     if (System.getenv(HADOOP_USER_NAME) != null) {
       launcher.setEnv(HADOOP_USER_NAME, System.getenv(HADOOP_USER_NAME));
     }
-    //add english env
     launcher.setEnv("LANG", "en_US.UTF-8");
     launcher.setEnv("LC_ALL", "en_US.UTF-8");
     launcher.setEnv("LANGUAGE", "en_US.UTF-8");
 
-    //local resources
-    providerUtils.localizePackages(launcher, fileSystem, appConf, roleGroup,
-        getClusterName());
-
-    if (SliderUtils.isHadoopClusterSecure(getConfig())) {
-      providerUtils.localizeServiceKeytabs(launcher, instanceDefinition,
-          fileSystem, getClusterName());
+    for (Entry<String, String> entry : launcher.getEnv().entrySet()) {
+      tokensForSubstitution.put("${" + entry.getKey() + "}", entry.getValue());
     }
 
-    if (appComponent.getOptionBool(AM_CONFIG_GENERATION, false)) {
-      // build and localize configuration files
-      Map<String, Map<String, String>> configurations =
-          providerUtils.buildConfigurations(
-              instanceDefinition.getAppConfOperations(),
-              instanceDefinition.getInternalOperations(),
-              container.getId().toString(), getClusterName(),
-              roleName, roleGroup, getAmState());
-      providerUtils.localizeConfigFiles(launcher, roleName, roleGroup,
-          appConf, configurations, launcher.getEnv(), fileSystem,
-          getClusterName());
+    providerUtils.addRoleHostTokens(tokensForSubstitution, amState);
+
+    log.info("Token for substitution: " + tokensForSubstitution);
+
+    if (SliderUtils.isHadoopClusterSecure(getConfig())) {
+      //TODO localize key tabs, WHY is this code needed ? WHY DOES CONTAINER REQUIRE AM KEYTAB??
+      providerUtils.localizeServiceKeytabs(launcher, fileSystem, application);
     }
 
-    //add the configuration resources
-    launcher.addLocalResources(fileSystem.submitDirectory(
-        generatedConfPath,
-        PROPAGATED_CONF_DIR_NAME));
+    // create config file on hdfs and add local resource
+    providerUtils.createConfigFileAndAddLocalResource(launcher, fileSystem,
+        component, tokensForSubstitution, amState);
 
     CommandLineBuilder operation = new CommandLineBuilder();
-    operation.add(appConf.getComponentOpt(roleGroup, DOCKER_START_COMMAND,
-        "/bin/bash"));
-
+    operation.add(component.getLaunchCommand());
     operation.add("> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/"
         + OUT_FILE + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/"
         + ERR_FILE);
-
     launcher.addCommand(operation.build());
 
-    // Additional files to localize
-    String appResourcesString = instanceDefinition.getAppConfOperations()
-        .getGlobalOptions().getOption(APP_RESOURCES, null);
-    log.info("Configuration value for extra resources to localize: {}", appResourcesString);
-    if (null != appResourcesString) {
-      try (Scanner scanner = new Scanner(appResourcesString).useDelimiter(",")) {
-        while (scanner.hasNext()) {
-          String resource = scanner.next();
-          Path resourcePath = new Path(resource);
-          LocalResource extraResource = fileSystem.createAmResource(
-              fileSystem.getFileSystem().resolvePath(resourcePath),
-              LocalResourceType.FILE);
-          String destination = APP_RESOURCES_DIR + "/" + resourcePath.getName();
-          log.info("Localizing {} to {}", resourcePath, destination);
-          // TODO Can we try harder to avoid collisions?
-          launcher.addLocalResource(destination, extraResource);
-        }
-      }
-    }
+    // publish exports
+    // TODO move this to app level, no need to do this for every container launch
+    providerUtils
+        .substituteConfigs(application.getQuicklinks(), tokensForSubstitution);
+    PublishedConfiguration pubconf = new PublishedConfiguration(QUICK_LINKS,
+        application.getQuicklinks().entrySet());
+    amState.getPublishedSliderConfigurations().put(QUICK_LINKS, pubconf);
   }
 
-  @Override
-  public void initializeApplicationConfiguration(
-      AggregateConf instanceDefinition, SliderFileSystem fileSystem,
-      String roleGroup)
-      throws IOException, SliderException {
-        this.fileSystem = fileSystem;
-  }
-
-  @Override
-  public void applyInitialRegistryDefinitions(URL amWebURI,
-      ServiceRecord serviceRecord)
-      throws IOException {
-    super.applyInitialRegistryDefinitions(amWebURI, serviceRecord);
-
-    // identify client component
-    String clientName = null;
-    ConfTreeOperations appConf = getAmState().getAppConfSnapshot();
-    for (String component : appConf.getComponentNames()) {
-      if (COMPONENT_TYPE_CLIENT.equals(appConf.getComponentOpt(component,
-          COMPONENT_TYPE_KEY, null))) {
-        clientName = component;
-        break;
-      }
-    }
-    if (clientName == null) {
-      log.info("No client component specified, not publishing client configs");
-      return;
-    }
-
-    // register AM-generated client configs
-    // appConf should already be resolved!
-    MapOperations clientOperations = appConf.getComponent(clientName);
-    if (!clientOperations.getOptionBool(AM_CONFIG_GENERATION, false)) {
-      log.info("AM config generation is false, not publishing client configs");
-      return;
-    }
-
-    // build and localize configuration files
-    Map<String, Map<String, String>> configurations =
-        providerUtils.buildConfigurations(appConf, getAmState()
-            .getInternalsSnapshot(), null, getClusterName(), clientName,
-            clientName, getAmState());
-
-    for (Map.Entry<String, Map<String, String>>  entry : configurations.entrySet()) {
-      String configFileDN = entry.getKey();
-      String configFileName = appConf.getComponentOpt(clientName,
-          OptionKeys.CONF_FILE_PREFIX + configFileDN + OptionKeys
-              .NAME_SUFFIX, null);
-      String configFileType = appConf.getComponentOpt(clientName,
-          OptionKeys.CONF_FILE_PREFIX + configFileDN + OptionKeys
-              .TYPE_SUFFIX, null);
-      if (configFileName == null || configFileType == null) {
-        continue;
-      }
-      ConfigFormat configFormat = ConfigFormat.resolve(configFileType);
-
-      Map<String, String> config = entry.getValue();
-      ConfigUtils.prepConfigForTemplateOutputter(configFormat, config,
-          fileSystem, getClusterName(),
-          new File(configFileName).getName());
-      providerUtils.publishApplicationInstanceData(configFileDN, configFileDN,
-          config.entrySet(), getAmState());
-    }
-  }
-
-  @Override
-  public void notifyContainerCompleted(ContainerId containerId) {
-    if (containerId != null) {
-      String containerIdStr = containerId.toString();
-      log.info("Removing container exports for {}", containerIdStr);
-      for (Set<ExportEntry> exportEntries : exportMap.values()) {
-        for (Iterator<ExportEntry> iter = exportEntries.iterator();
-            iter.hasNext();) {
-          ExportEntry entry = iter.next();
-          if (containerIdStr.equals(entry.getContainerId())) {
-            iter.remove();
-          }
-        }
-      }
-    }
-  }
-
-  @Override
   public boolean processContainerStatus(ContainerId containerId,
       ContainerStatus status) {
     log.debug("Handling container status: {}", status);
@@ -304,144 +147,24 @@ public class DockerProviderService extends AbstractProviderService implements
         SliderUtils.isUnset(status.getHost())) {
       return true;
     }
-    RoleInstance instance = getAmState().getOwnedContainer(containerId);
+    RoleInstance instance = amState.getOwnedContainer(containerId);
     if (instance == null) {
       // container is completed?
       return false;
     }
 
-    String roleName = instance.role;
-    String roleGroup = instance.group;
-    String containerIdStr = containerId.toString();
-
-    providerUtils.updateServiceRecord(getAmState(), yarnRegistry,
-        containerIdStr, roleName, status.getIPs(), status.getHost());
-
-    publishExportGroups(containerIdStr, roleName, roleGroup,
-        status.getHost(), status.getIPs());
-    return false;
-  }
-
-  /**
-   * This method looks for configuration properties of the form
-   * export.key,value and publishes the key,value pair. Standard tokens are
-   * substituted into the value, and COMPONENTNAME_HOST and THIS_HOST tokens
-   * are substituted with the actual hostnames of the containers.
-   */
-  protected void publishExportGroups(String containerId,
-      String roleName, String roleGroup, String thisHost, List<String> ips) {
-    ConfTreeOperations appConf = getAmState().getAppConfSnapshot();
-    ConfTreeOperations internalsConf = getAmState().getInternalsSnapshot();
-
-    Map<String, String> exports = providerUtils.getExports(
-        getAmState().getAppConfSnapshot(), roleGroup);
-
-    // publish export groups if any
-    Map<String, String> standardTokens = providerUtils.getStandardTokenMap(
-        appConf, internalsConf, roleName, roleGroup, containerId,
-        getClusterName());
-    Map<String, String> replaceTokens = providerUtils.filterSiteOptions(
-            appConf.getComponent(roleGroup).options, standardTokens);
-    replaceTokens.putAll(standardTokens);
-
-    String roleNameKey = providerUtils.getNameKey(roleName, roleGroup,
-        appConf);
-    String roleNameIPKey = null;
-    if (roleNameKey != null) {
-      replaceTokens.put(String.format(HOST_KEY_FORMAT, roleNameKey), thisHost);
-      roleNameIPKey = Pattern.quote(String.format(IP_KEY_FORMAT, roleNameKey));
-    } else {
-      // should not happen, but log if it does
-      log.info("Not replacing HOST or IP tokens because key was null for {}",
-          roleName);
-    }
-    String roleGroupKey = providerUtils.getGroupKey(roleGroup, appConf);
-    String roleGroupIPKey = null;
-    if (roleGroupKey != null) {
-      if (roleNameKey == null || !roleGroupKey.equals(roleNameKey)) {
-        replaceTokens.put(String.format(HOST_KEY_FORMAT, roleGroupKey),
-            thisHost);
-        roleGroupIPKey = Pattern.quote(String.format(IP_KEY_FORMAT,
-            roleGroupKey));
-      }
+    providerUtils.updateServiceRecord(amState, yarnRegistry,
+        containerId.toString(), instance.role, status.getIPs(), status.getHost());
+    // TODO publish ip and host
+    org.apache.slider.api.resource.Container container =
+        instance.providerRole.component.getContainer(containerId.toString());
+    if (container != null) {
+      container.setIp(StringUtils.join(",", status.getIPs()));
+      container.setHostname(status.getHost());
+      container.setState(ContainerState.READY);
     } else {
-      // should not happen, but log if it does
-      log.info("Not replacing HOST or IP tokens because key was null for {}",
-          roleGroup);
-    }
-    replaceTokens.put("${THIS_HOST}", thisHost);
-
-    for (Entry<String, String> export : exports.entrySet()) {
-      String value = export.getValue();
-      // replace host names and site properties
-      for (Map.Entry<String, String>  entry : replaceTokens.entrySet()) {
-        String token = entry.getKey();
-        if (value.contains(token)) {
-          value = value.replaceAll(Pattern.quote(token), entry.getValue());
-        }
-      }
-      Set<String> values = new HashSet<>();
-      for (String ip : ips) {
-        values.add(substituteIP(roleNameIPKey, roleGroupIPKey, ip, value));
-      }
-      for (String exportValue : values) {
-        if (exportValue.contains(VARIABLE_INDICATOR)) {
-          // not all variables have been substituted, so do not export
-          continue;
-        }
-        ExportEntry entry = new ExportEntry();
-        entry.setContainerId(containerId);
-        entry.setLevel(APPLICATION_TAG);
-        entry.setValue(exportValue);
-        entry.setUpdatedTime(new Date().toString());
-        Set<ExportEntry> exportEntries = getExportEntries(export.getKey());
-        exportEntries.add(entry);
-        log.info("Preparing to publish for {}. Key {} and Value {}",
-            roleName, export.getKey(), entry);
-      }
-    }
-    if (!exportMap.isEmpty()) {
-      providerUtils.publishExportGroup(exportMap, getAmState(), EXPORT_GROUP);
-    }
-  }
-
-  protected String substituteIP(String roleNameIPKey, String roleGroupIPKey,
-      String ip, String value) {
-    if (roleNameIPKey != null) {
-      value = value.replaceAll(roleNameIPKey, ip);
-    }
-    if (roleGroupIPKey != null) {
-      value = value.replaceAll(roleGroupIPKey, ip);
-    }
-    return value;
-  }
-
-  protected Set<ExportEntry> getExportEntries(String key) {
-    if (!this.exportMap.containsKey(key)) {
-      synchronized (this.exportMap) {
-        if (!this.exportMap.containsKey(key)) {
-          this.exportMap.put(key, Collections.newSetFromMap(
-              new ConcurrentHashMap<>()));
-        }
-      }
-    }
-
-    return this.exportMap.get(key);
-  }
-
-  @Override
-  public Map<String, MonitorDetail> buildMonitorDetails(ClusterDescription clusterDesc) {
-    Map<String, MonitorDetail> details = super.buildMonitorDetails(clusterDesc);
-    buildRoleHostDetails(details);
-    return details;
-  }
-
-  private void buildRoleHostDetails(Map<String, MonitorDetail> details) {
-    for (Map.Entry<String, Map<String, ClusterNode>> entry :
-        getAmState().getRoleClusterNodeMapping().entrySet()) {
-      details.put(entry.getKey() + " Host(s)/Container(s)",
-          new MonitorDetail(providerUtils.getHostsList(
-              entry.getValue().values(), false).toString(), false));
+      log.warn(containerId + " not found in Application!");
     }
+    return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMClientProvider.java
deleted file mode 100644
index b58d3aa..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMClientProvider.java
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.providers.slideram;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.api.records.LocalResource;
-import org.apache.hadoop.yarn.api.records.LocalResourceType;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.slider.api.InternalKeys;
-import org.apache.slider.api.ResourceKeys;
-import org.apache.slider.api.RoleKeys;
-import org.apache.slider.common.SliderKeys;
-import org.apache.slider.common.SliderXmlConfKeys;
-import org.apache.slider.common.tools.SliderFileSystem;
-import org.apache.slider.common.tools.SliderUtils;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.MapOperations;
-import org.apache.slider.core.exceptions.BadClusterStateException;
-import org.apache.slider.core.exceptions.BadConfigException;
-import org.apache.slider.core.exceptions.SliderException;
-import org.apache.slider.core.launch.AbstractLauncher;
-import org.apache.slider.core.launch.JavaCommandLineBuilder;
-import org.apache.slider.providers.AbstractClientProvider;
-import org.apache.slider.providers.PlacementPolicy;
-import org.apache.slider.providers.ProviderRole;
-import org.apache.slider.providers.ProviderUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.slider.api.ResourceKeys.COMPONENT_INSTANCES;
-
-/**
- * handles the setup of the Slider AM.
- * This keeps aspects of role, cluster validation and Clusterspec setup
- * out of the core slider client
- */
-public class SliderAMClientProvider extends AbstractClientProvider
-    implements SliderKeys {
-
-
-  protected static final Logger log =
-    LoggerFactory.getLogger(SliderAMClientProvider.class);
-  protected static final String NAME = "SliderAM";
-  public static final String INSTANCE_RESOURCE_BASE = PROVIDER_RESOURCE_BASE_ROOT +
-                                                       "slideram/instance/";
-  public static final String INTERNAL_JSON =
-    INSTANCE_RESOURCE_BASE + "internal.json";
-  public static final String APPCONF_JSON =
-    INSTANCE_RESOURCE_BASE + "appconf.json";
-  public static final String RESOURCES_JSON =
-    INSTANCE_RESOURCE_BASE + "resources.json";
-
-  public SliderAMClientProvider(Configuration conf) {
-    super(conf);
-  }
-
-  /**
-   * List of roles
-   */
-  public static final List<ProviderRole> ROLES =
-    new ArrayList<ProviderRole>();
-
-  public static final int KEY_AM = ROLE_AM_PRIORITY_INDEX;
-
-  public static final ProviderRole APPMASTER =
-      new ProviderRole(COMPONENT_AM, KEY_AM,
-          PlacementPolicy.EXCLUDE_FROM_FLEXING,
-          ResourceKeys.DEFAULT_NODE_FAILURE_THRESHOLD, 
-          0, "");
-
-  /**
-   * Initialize role list
-   */
-  static {
-    ROLES.add(APPMASTER);
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-
-  @Override
-  public List<ProviderRole> getRoles() {
-    return ROLES;
-  }
-
-
-  @Override //Client
-  public void preflightValidateClusterConfiguration(SliderFileSystem sliderFileSystem,
-                                                    String clustername,
-                                                    Configuration configuration,
-                                                    AggregateConf instanceDefinition,
-                                                    Path clusterDirPath,
-                                                    Path generatedConfDirPath,
-                                                    boolean secure)
-      throws SliderException, IOException {
-
-    super.preflightValidateClusterConfiguration(sliderFileSystem, clustername, configuration, instanceDefinition, clusterDirPath, generatedConfDirPath, secure);
-    //add a check for the directory being writeable by the current user
-    String
-      dataPath = instanceDefinition.getInternalOperations()
-                                   .getGlobalOptions()
-                                   .getMandatoryOption(
-                                     InternalKeys.INTERNAL_DATA_DIR_PATH);
-
-    Path path = new Path(dataPath);
-    sliderFileSystem.verifyDirectoryWriteAccess(path);
-    Path historyPath = new Path(clusterDirPath, SliderKeys.HISTORY_DIR_NAME);
-    sliderFileSystem.verifyDirectoryWriteAccess(historyPath);
-  }
-
-  /**
-   * Verify that an instance definition is considered valid by the provider
-   * @param instanceDefinition instance definition
-   * @throws SliderException if the configuration is not valid
-   */
-  public void validateInstanceDefinition(AggregateConf instanceDefinition, SliderFileSystem fs) throws
-      SliderException {
-
-    super.validateInstanceDefinition(instanceDefinition, fs);
-    
-    // make sure there is no negative entry in the instance count
-    Map<String, Map<String, String>> instanceMap =
-        instanceDefinition.getResources().components;
-    for (Map.Entry<String, Map<String, String>> entry : instanceMap.entrySet()) {
-      MapOperations mapOperations = new MapOperations(entry);
-      int instances = mapOperations.getOptionInt(COMPONENT_INSTANCES, 0);
-      if (instances < 0) {
-        throw new BadClusterStateException(
-            "Component %s has negative instance count: %d",
-            mapOperations.name,
-            instances);
-      }
-    }
-  }
-  
-  /**
-   * The Slider AM sets up all the dependency JARs above slider.jar itself
-   * {@inheritDoc}
-   */
-  public void prepareAMAndConfigForLaunch(SliderFileSystem fileSystem,
-      Configuration serviceConf,
-      AbstractLauncher launcher,
-      AggregateConf instanceDescription,
-      Path snapshotConfDirPath,
-      Path generatedConfDirPath,
-      Configuration clientConfExtras,
-      String libdir,
-      Path tempPath, boolean miniClusterTestRun)
-    throws IOException, SliderException {
-
-    Map<String, LocalResource> providerResources = new HashMap<>();
-
-    ProviderUtils.addProviderJar(providerResources,
-        this,
-        SLIDER_JAR,
-        fileSystem,
-        tempPath,
-        libdir,
-        miniClusterTestRun);
-
-    log.info("Loading all dependencies for AM.");
-    // If slider.tar.gz is available in hdfs use it, else upload all jars
-    Path dependencyLibTarGzip = fileSystem.getDependencyTarGzip();
-    if (fileSystem.isFile(dependencyLibTarGzip)) {
-      SliderUtils.putAmTarGzipAndUpdate(providerResources, fileSystem);
-    } else {
-      for (String libDirProp : SliderUtils.getLibDirs()) {
-        ProviderUtils.addAllDependencyJars(providerResources,
-                                           fileSystem,
-                                           tempPath,
-                                           libdir,
-                                           libDirProp);
-
-      }
-    }
-    addKeytabResourceIfNecessary(fileSystem,
-                                 instanceDescription,
-                                 providerResources);
-
-    launcher.addLocalResources(providerResources);
-
-    //also pick up all env variables from a map
-    launcher.copyEnvVars(
-      instanceDescription.getInternalOperations().getOrAddComponent(
-        SliderKeys.COMPONENT_AM));
-  }
-
-  /**
-   * If the cluster is secure, and an HDFS installed keytab is available for AM
-   * authentication, add this keytab as a local resource for the AM launch.
-   *
-   * @param fileSystem
-   * @param instanceDescription
-   * @param providerResources
-   * @throws IOException
-   * @throws BadConfigException if there's no keytab and it is explicitly required.
-   */
-  protected void addKeytabResourceIfNecessary(SliderFileSystem fileSystem,
-                                              AggregateConf instanceDescription,
-                                              Map<String, LocalResource> providerResources)
-    throws IOException, BadConfigException {
-    if (UserGroupInformation.isSecurityEnabled()) {
-      String keytabPathOnHost = instanceDescription.getAppConfOperations()
-          .getComponent(SliderKeys.COMPONENT_AM).get(
-              SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
-      if (SliderUtils.isUnset(keytabPathOnHost)) {
-        String amKeytabName = instanceDescription.getAppConfOperations()
-            .getComponent(SliderKeys.COMPONENT_AM).get(
-                SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
-        String keytabDir = instanceDescription.getAppConfOperations()
-            .getComponent(SliderKeys.COMPONENT_AM).get(
-                SliderXmlConfKeys.KEY_HDFS_KEYTAB_DIR);
-        Path keytabPath = fileSystem.buildKeytabPath(keytabDir, amKeytabName,
-                                                     instanceDescription.getName());
-        if (fileSystem.getFileSystem().exists(keytabPath)) {
-          LocalResource keytabRes = fileSystem.createAmResource(keytabPath,
-                                                  LocalResourceType.FILE);
-
-          providerResources.put(SliderKeys.KEYTAB_DIR + "/" +
-                                 amKeytabName, keytabRes);
-        } else {
-          log.warn("No keytab file was found at {}.", keytabPath);
-          if (getConf().getBoolean(KEY_AM_LOGIN_KEYTAB_REQUIRED, false)) {
-            throw new BadConfigException("No keytab file was found at %s.", keytabPath);
-
-          } else {
-            log.warn("The AM will be "
-              + "started without a kerberos authenticated identity. "
-              + "The application is therefore not guaranteed to remain "
-              + "operational beyond 24 hours.");
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Update the AM resource with any local needs
-   * @param capability capability to update
-   */
-  public void prepareAMResourceRequirements(MapOperations sliderAM,
-                                            Resource capability) {
-    capability.setMemory(sliderAM.getOptionInt(
-      ResourceKeys.YARN_MEMORY,
-      capability.getMemory()));
-    capability.setVirtualCores(
-        sliderAM.getOptionInt(ResourceKeys.YARN_CORES, capability.getVirtualCores()));
-  }
-  
-  /**
-   * Extract any JVM options from the cluster specification and
-   * add them to the command line
-   */
-  public void addJVMOptions(AggregateConf aggregateConf,
-                            JavaCommandLineBuilder cmdLine)
-      throws BadConfigException {
-
-    MapOperations sliderAM =
-        aggregateConf.getAppConfOperations().getMandatoryComponent(
-        SliderKeys.COMPONENT_AM);
-    cmdLine.forceIPv4().headless();
-    String heap = sliderAM.getOption(RoleKeys.JVM_HEAP,
-                                   DEFAULT_JVM_HEAP);
-    cmdLine.setJVMHeap(heap);
-    String jvmopts = sliderAM.getOption(RoleKeys.JVM_OPTS, "");
-    if (SliderUtils.isSet(jvmopts)) {
-      cmdLine.add(jvmopts);
-    }
-  }
-
-
-  @Override
-  public void prepareInstanceConfiguration(AggregateConf aggregateConf)
-      throws SliderException, IOException {
-    mergeTemplates(aggregateConf,
-        INTERNAL_JSON, RESOURCES_JSON, APPCONF_JSON
-                  );
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMProviderService.java
deleted file mode 100644
index c021b80..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMProviderService.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.providers.slideram;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
-import org.apache.hadoop.registry.client.types.ServiceRecord;
-import org.apache.slider.common.SliderKeys;
-import org.apache.slider.common.tools.ConfigHelper;
-import org.apache.slider.common.tools.SliderFileSystem;
-import org.apache.slider.common.tools.SliderUtils;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.MapOperations;
-import org.apache.slider.core.exceptions.BadCommandArgumentsException;
-import org.apache.slider.core.exceptions.SliderException;
-import org.apache.slider.core.launch.ContainerLauncher;
-import org.apache.slider.core.registry.docstore.PublishedConfiguration;
-import org.apache.slider.core.registry.info.CustomRegistryConstants;
-import org.apache.slider.providers.AbstractProviderService;
-import org.apache.slider.providers.ProviderCore;
-import org.apache.slider.providers.ProviderRole;
-import org.apache.slider.providers.agent.AgentKeys;
-import org.apache.slider.server.appmaster.PublishedArtifacts;
-import org.apache.slider.server.appmaster.web.rest.RestPaths;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.apache.slider.server.appmaster.web.rest.RestPaths.*;
-
-/**
- * Exists just to move some functionality out of AppMaster into a peer class
- * of the actual service provider doing the real work
- */
-public class SliderAMProviderService extends AbstractProviderService implements
-    ProviderCore,
-    AgentKeys,
-    SliderKeys {
-
-  public SliderAMProviderService() {
-    super("SliderAMProviderService");
-  }
-
-  @Override
-  public String getHumanName() {
-    return "Slider Application";
-  }
-  
-  @Override
-  public Configuration loadProviderConfigurationInformation(File confDir) throws
-      BadCommandArgumentsException,
-      IOException {
-    return null;
-  }
-
-  @Override
-  public void buildContainerLaunchContext(ContainerLauncher containerLauncher,
-      AggregateConf instanceDefinition,
-      Container container,
-      ProviderRole role,
-      SliderFileSystem sliderFileSystem,
-      Path generatedConfPath,
-      MapOperations resourceComponent,
-      MapOperations appComponent,
-      Path containerTmpDirPath) throws IOException, SliderException {
-  }
-
-  @Override
-  public List<ProviderRole> getRoles() {
-    return new ArrayList<>(0);
-  }
-
-  @Override
-  public void validateInstanceDefinition(AggregateConf instanceDefinition) throws
-      SliderException {
-
-  }
-
-  @Override
-  public void applyInitialRegistryDefinitions(URL amWebURI,
-      ServiceRecord serviceRecord)
-      throws IOException {
-    super.applyInitialRegistryDefinitions(amWebURI,
-        serviceRecord);
-    // now publish site.xml files
-    YarnConfiguration defaultYarnConfig = new YarnConfiguration();
-    amState.getPublishedSliderConfigurations().put(
-        PublishedArtifacts.COMPLETE_CONFIG,
-        new PublishedConfiguration(
-            "Complete slider application settings",
-            getConfig(), getConfig()));
-    amState.getPublishedSliderConfigurations().put(
-        PublishedArtifacts.YARN_SITE_CONFIG,
-        new PublishedConfiguration(
-            "YARN site settings",
-            ConfigHelper.loadFromResource("yarn-site.xml"),
-            defaultYarnConfig) );
-
-    amState.getPublishedSliderConfigurations().put(
-        PublishedArtifacts.CORE_SITE_CONFIG,
-        new PublishedConfiguration(
-            "Core site settings",
-            ConfigHelper.loadFromResource("core-site.xml"),
-            defaultYarnConfig) );
-    amState.getPublishedSliderConfigurations().put(
-        PublishedArtifacts.HDFS_SITE_CONFIG,
-        new PublishedConfiguration(
-            "HDFS site settings",
-            ConfigHelper.loadFromResource("hdfs-site.xml"),
-            new HdfsConfiguration(true)) );
-
-
-    try {
-
-      URL managementAPI = new URL(amWebURI, RELATIVE_PATH_MANAGEMENT);
-      URL registryREST = new URL(amWebURI, RELATIVE_PATH_REGISTRY);
-
-      URL publisherURL = new URL(amWebURI, RELATIVE_PATH_PUBLISHER);
-
-      // Set the configurations URL.
-
-      String configurationsURL = SliderUtils.appendToURL(
-          publisherURL.toExternalForm(), RestPaths.SLIDER_CONFIGSET);
-      String exportsURL = SliderUtils.appendToURL(
-          publisherURL.toExternalForm(), RestPaths.SLIDER_EXPORTS);
-
-      serviceRecord.addExternalEndpoint(
-          RegistryTypeUtils.webEndpoint(
-              CustomRegistryConstants.WEB_UI, amWebURI.toURI()));
-      
-      serviceRecord.addExternalEndpoint(
-          RegistryTypeUtils.webEndpoint(
-              CustomRegistryConstants.AM_REST_BASE, amWebURI.toURI()));
-      
-      serviceRecord.addExternalEndpoint(
-          RegistryTypeUtils.restEndpoint(
-              CustomRegistryConstants.MANAGEMENT_REST_API,
-              managementAPI.toURI()));
-      serviceRecord.addExternalEndpoint(
-          RegistryTypeUtils.restEndpoint(
-              CustomRegistryConstants.PUBLISHER_REST_API,
-              publisherURL.toURI()));
-      serviceRecord.addExternalEndpoint(
-          RegistryTypeUtils.restEndpoint(
-              CustomRegistryConstants.REGISTRY_REST_API,
-              registryREST.toURI()));
-      serviceRecord.addExternalEndpoint(
-          RegistryTypeUtils.restEndpoint(
-              CustomRegistryConstants.PUBLISHER_CONFIGURATIONS_API,
-              new URI(configurationsURL)));
-      serviceRecord.addExternalEndpoint(
-          RegistryTypeUtils.restEndpoint(
-              CustomRegistryConstants.PUBLISHER_EXPORTS_API,
-              new URI(exportsURL)));
-
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
index 3cfe167..70eab71 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java
@@ -18,16 +18,12 @@
 
 package org.apache.slider.server.appmaster;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.slider.common.SliderKeys;
+import org.apache.slider.api.resource.Application;
 import org.apache.slider.common.tools.SliderFileSystem;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.MapOperations;
 import org.apache.slider.core.launch.ContainerLauncher;
 import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.providers.ProviderService;
@@ -37,8 +33,8 @@ import org.apache.slider.server.appmaster.actions.QueueAccess;
 import org.apache.slider.server.appmaster.state.ContainerAssignment;
 import org.apache.slider.server.appmaster.state.RoleInstance;
 import org.apache.slider.server.appmaster.state.RoleStatus;
-import org.apache.slider.server.services.workflow.WorkflowExecutorService;
 import org.apache.slider.server.services.workflow.ServiceThreadFactory;
+import org.apache.slider.server.services.workflow.WorkflowExecutorService;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -73,16 +69,6 @@ public class RoleLaunchService
    */
   private final SliderFileSystem fs;
 
-  /**
-   * Path in the launch filesystem that refers to a configuration directory
-   * -the interpretation of it is left to the Provider
-   */
-  private final Path generatedConfDirPath;
-  /**
-   * Path in the launch filesystem that refers to a temp directory
-   * which will be cleaned up at (some) time in the future
-   */
-  private final Path launcherTmpDirPath;
 
   private Map<String, String> envVars;
 
@@ -91,21 +77,13 @@ public class RoleLaunchService
    * @param queueAccess
    * @param provider the provider
    * @param fs filesystem
-   * @param generatedConfDirPath path in the FS for the generated dir
    * @param envVars environment variables
-   * @param launcherTmpDirPath path for a temporary data in the launch process
    */
-  public RoleLaunchService(QueueAccess queueAccess,
-      ProviderService provider,
-      SliderFileSystem fs,
-      Path generatedConfDirPath,
-      Map<String, String> envVars,
-      Path launcherTmpDirPath) {
+  public RoleLaunchService(QueueAccess queueAccess, ProviderService provider,
+      SliderFileSystem fs, Map<String, String> envVars) {
     super(ROLE_LAUNCH_SERVICE);
     this.actionQueue = queueAccess;
     this.fs = fs;
-    this.generatedConfDirPath = generatedConfDirPath;
-    this.launcherTmpDirPath = launcherTmpDirPath;
     this.provider = provider;
     this.envVars = envVars;
   }
@@ -120,23 +98,13 @@ public class RoleLaunchService
   /**
    * Start an asychronous launch operation
    * @param assignment container assignment
-   * @param clusterSpec cluster spec to use for template
    * @param credentials credentials to use
    */
   public void launchRole(ContainerAssignment assignment,
-      AggregateConf clusterSpec,
-      Credentials credentials) {
-    RoleStatus role = assignment.role;
-    String roleName = role.getName();
-    String roleGroup = role.getGroup();
-    // prelaunch safety check
-    Preconditions.checkArgument(provider.isSupportedRole(roleName));
+      Application application, Credentials credentials) {
     RoleLaunchService.RoleLauncher launcher =
-      new RoleLaunchService.RoleLauncher(assignment,
-         clusterSpec,
-         clusterSpec.getResourceOperations().getOrAddComponent(roleGroup),
-         clusterSpec.getAppConfOperations().getOrAddComponent(roleGroup),
-         credentials);
+        new RoleLaunchService.RoleLauncher(assignment, application,
+            credentials);
     execute(launcher);
   }
 
@@ -148,35 +116,21 @@ public class RoleLaunchService
     private final ContainerAssignment assignment;
     // Allocated container
     public final Container container;
-    private final MapOperations resourceComponent;
-    private final MapOperations appComponent;
-    private final AggregateConf instanceDefinition;
+    public final Application application;
     public final ProviderRole role;
     private final Credentials credentials;
-    private Exception raisedException;
 
     public RoleLauncher(ContainerAssignment assignment,
-        AggregateConf instanceDefinition,
-        MapOperations resourceComponent,
-        MapOperations appComponent,
+        Application application,
         Credentials credentials) {
       this.assignment = assignment;
       this.credentials = credentials;
       this.container = assignment.container;
       RoleStatus roleStatus = assignment.role;
-
-      assert resourceComponent != null;
-      assert appComponent != null;
       ProviderRole providerRole = roleStatus.getProviderRole();
-      assert providerRole != null;
       this.role = providerRole;
-      this.resourceComponent = resourceComponent;
-      this.appComponent = appComponent;
-      this.instanceDefinition = instanceDefinition;
-    }
+      this.application = application;
 
-    public Exception getRaisedException() {
-      return raisedException;
     }
 
     @Override
@@ -196,62 +150,46 @@ public class RoleLaunchService
         containerLauncher.setupUGI();
         containerLauncher.putEnv(envVars);
 
-        log.debug("Launching container {} into role {}",
-                  container.getId(),
-                  role.name);
+        log.info("Launching container {} into RoleName = {}, RoleGroup = {}",
+            container.getId(), role.name, role.group);
 
-        //now build up the configuration data
-        Path containerTmpDirPath =
-          new Path(launcherTmpDirPath, container.getId().toString());
-        provider.buildContainerLaunchContext(containerLauncher,
-            instanceDefinition,
-            container,
-            role,
-            fs,
-            generatedConfDirPath,
-            resourceComponent,
-            appComponent,
-            containerTmpDirPath);
+        provider.buildContainerLaunchContext(containerLauncher, application,
+            container, role, fs);
 
         RoleInstance instance = new RoleInstance(container);
         String[] envDescription = containerLauncher.dumpEnvToString();
 
         String commandsAsString = containerLauncher.getCommandsAsString();
-        log.info("Starting container with command: {}",
-                 commandsAsString);
+        log.info("Starting container with command: {}", commandsAsString);
 
+        instance.providerRole = role;
         instance.command = commandsAsString;
         instance.role = role.name;
         instance.group = role.group;
         instance.roleId = role.id;
-        instance.appVersion = instanceDefinition.getAppConfOperations()
-            .getGlobalOptions().get(SliderKeys.APP_VERSION);
         instance.environment = envDescription;
-        int delay = appComponent.getOptionInt(
-            AgentKeys.KEY_CONTAINER_LAUNCH_DELAY, 0);
-        int maxDelay =
-            getConfig().getInt(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS,
-                               YarnConfiguration.DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS);
+        long delay = role.component.getConfiguration()
+            .getPropertyLong(AgentKeys.KEY_CONTAINER_LAUNCH_DELAY, 0);
+        long maxDelay = getConfig()
+            .getLong(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS,
+                YarnConfiguration.DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS);
         if (delay > maxDelay/1000) {
           log.warn("Container launch delay of {} exceeds the maximum allowed of"
                    + " {} seconds.  Delay will not be utilized.",
                    delay, maxDelay/1000);
           delay = 0;
         }
-        log.info("Container launch delay for {} set to {} seconds",
-                 role.name, delay);
-        actionQueue.schedule(new ActionStartContainer("starting " + role.name,
-                                                      container,
-                                                      containerLauncher.completeContainerLaunch(),
-                                                      instance,
-                                                      delay,
-                                                      TimeUnit.SECONDS));
+        log.info("Container launch delay for {} set to {} seconds", role.name,
+            delay);
+        actionQueue.schedule(
+            new ActionStartContainer("starting " + role.name, container,
+                containerLauncher.completeContainerLaunch(), instance, delay,
+                TimeUnit.SECONDS));
       } catch (Exception e) {
-        log.error("Exception thrown while trying to start {}: {}",
-            role.name, e, e);
-        raisedException = e;
+        log.error("Exception thrown while trying to start " + role.name
+            + " container = " + container.getId() + " on host " + container
+            .getNodeId(), e);
       }
     }
-
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: YARN-6419. Support to launch new native-service from new YARN UI. Contributed by Akhil PB.

Posted by ji...@apache.org.
YARN-6419. Support to launch new native-service from new YARN UI. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81cb5326
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81cb5326
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81cb5326

Branch: refs/heads/yarn-native-services
Commit: 81cb53262a30b458396e818e4dad39d12a08f5f7
Parents: 2ace79d
Author: Sunil G <su...@apache.org>
Authored: Wed May 3 12:30:55 2017 +0530
Committer: Jian He <ji...@apache.org>
Committed: Tue Jul 25 10:49:25 2017 -0700

----------------------------------------------------------------------
 .../main/webapp/app/adapters/restabstract.js    |  50 ++++
 .../main/webapp/app/adapters/yarn-servicedef.js |  31 +++
 .../webapp/app/components/breadcrumb-bar.js     |   1 +
 .../webapp/app/components/deploy-service.js     | 167 +++++++++++
 .../app/components/fileconfig-viewer-dialog.js  |  36 +++
 .../main/webapp/app/components/info-tooltip.js  |  44 +++
 .../app/components/service-component-table.js   |  56 ++++
 .../app/components/service-config-table.js      |  89 ++++++
 .../app/components/service-fileconfig-table.js  | 112 ++++++++
 .../main/webapp/app/components/upload-config.js |  54 ++++
 .../app/controllers/yarn-deploy-service.js      |  69 +++++
 .../main/webapp/app/models/yarn-servicedef.js   | 278 +++++++++++++++++++
 .../src/main/webapp/app/router.js               |   1 +
 .../webapp/app/routes/yarn-deploy-service.js    |  27 ++
 .../src/main/webapp/app/services/hosts.js       |   4 +
 .../src/main/webapp/app/styles/app.css          | 164 +++++++++++
 .../main/webapp/app/templates/application.hbs   |   2 +
 .../app/templates/components/breadcrumb-bar.hbs |   4 +-
 .../app/templates/components/deploy-service.hbs | 157 +++++++++++
 .../components/fileconfig-viewer-dialog.hbs     |  53 ++++
 .../app/templates/components/info-tooltip.hbs   |  20 ++
 .../components/service-component-table.hbs      | 113 ++++++++
 .../components/service-config-table.hbs         | 130 +++++++++
 .../components/service-fileconfig-table.hbs     | 152 ++++++++++
 .../app/templates/components/upload-config.hbs  |  44 +++
 .../app/templates/yarn-deploy-service.hbs       |  33 +++
 .../main/webapp/app/templates/yarn-services.hbs |   4 +
 .../src/main/webapp/app/utils/info-seeder.js    |  26 ++
 .../src/main/webapp/config/configs.env          |   7 +
 .../src/main/webapp/config/default-config.js    |   7 +-
 .../components/deploy-service-test.js           |  43 +++
 .../components/fileconfig-viewer-dialog-test.js |  43 +++
 .../integration/components/info-tooltip-test.js |  43 +++
 .../components/service-component-table-test.js  |  43 +++
 .../components/service-config-table-test.js     |  43 +++
 .../components/service-fileconfig-table-test.js |  43 +++
 .../components/upload-config-test.js            |  43 +++
 .../tests/unit/adapters/yarn-servicedef-test.js |  30 ++
 .../controllers/yarn-deploy-service-test.js     |  30 ++
 .../tests/unit/models/yarn-servicedef-test.js   |  30 ++
 .../unit/routes/yarn-deploy-service-test.js     |  29 ++
 41 files changed, 2352 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/restabstract.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/restabstract.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/restabstract.js
new file mode 100644
index 0000000..df409d6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/restabstract.js
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import DS from 'ember-data';
+import Ember from 'ember';
+
+export default DS.RESTAdapter.extend({
+  address: null, //Must be set by inheriting classes
+  restNameSpace: null, //Must be set by inheriting classes
+  serverName: null, //Must be set by inheriting classes
+
+  headers: {
+    Accept: 'application/json'
+  },
+
+  host: Ember.computed("address", function() {
+    var address = this.get("address");
+    return this.get(`hosts.${address}`);
+  }),
+
+  namespace: Ember.computed("restNameSpace", function() {
+    var nameSpace = this.get("restNameSpace");
+    return this.get(`env.app.namespaces.${nameSpace}`);
+  }),
+
+  ajax(url, method, options) {
+    options = options || {};
+    options.crossDomain = true;
+    options.xhrFields = {
+      withCredentials: true
+    };
+    options.targetServer = this.get('serverName');
+    return this._super(url, method, options);
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
new file mode 100644
index 0000000..c362f5e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+import RESTAbstractAdapter from './restabstract';
+
+export default RESTAbstractAdapter.extend({
+  address: "dashWebAddress",
+  restNameSpace: "dashService",
+  serverName: "DASH",
+
+  deployService(request) {
+    var url = this.buildURL();
+    return this.ajax(url, "POST", {data: request});
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/breadcrumb-bar.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/breadcrumb-bar.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/breadcrumb-bar.js
index 44edb8e..b8d974a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/breadcrumb-bar.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/breadcrumb-bar.js
@@ -21,6 +21,7 @@ import Ember from 'ember';
 export default Ember.Component.extend({
 
   breadcrumbs: null,
+  hideRefresh: false,
 
   actions:{
     refresh: function () {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
new file mode 100644
index 0000000..90e10e5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+  viewType: 'standard',
+  savedStandardTemplates: null,
+  savedJsonTemplates: null,
+  savedTemplateName: '',
+  serviceDef: null,
+  customServiceDef: '',
+  serviceResp: null,
+  isLoading: false,
+
+  actions: {
+    showSaveTemplateModal() {
+      this.$('#saveListModal').modal('show');
+    },
+
+    deployService() {
+      this.set('serviceResp', null);
+      if (this.get('isStandardViewType')) {
+        this.sendAction("deployServiceDef", this.get('serviceDef'));
+      } else {
+        try {
+          var parsed = JSON.parse(this.get('customServiceDef'));
+          this.sendAction("deployServiceJson", parsed);
+        } catch (err) {
+          this.set('serviceResp', {type: 'error', message: 'Invalid JSON: ' + err.message});
+          throw err;
+        }
+      }
+    },
+
+    updateViewType(type) {
+      this.set('viewType', type);
+    },
+
+    addToSavedList() {
+      this.unselectAllSavedList();
+      if (this.get('isStandardViewType')) {
+        this.get('savedStandardTemplates').addObject({
+          name: this.get('savedTemplateName'),
+          defId: this.get('serviceDef.id'),
+          active: true
+        });
+        this.set('serviceDef.isCached', true);
+      } else {
+        this.get('savedJsonTemplates').addObject({
+          name: this.get('savedTemplateName'),
+          custom: this.get('customServiceDef'),
+          active: true
+        });
+      }
+      this.$('#saveListModal').modal('hide');
+      this.set('savedTemplateName', '');
+    },
+
+    updateServiceDef(def) {
+      this.selectActiveListItem(def);
+      if (this.get('isStandardViewType')) {
+        this.set('serviceDef', this.getStore().peekRecord('yarn-servicedef', def.defId));
+      } else {
+        this.set('customServiceDef', def.custom);
+      }
+    },
+
+    clearConfigs() {
+      this.unselectAllSavedList();
+      this.set('serviceResp', null);
+      if (this.get('isStandardViewType')) {
+        var oldDef = this.get('serviceDef');
+        var def = oldDef.createNewServiceDef();
+        this.set('serviceDef', def);
+        if (!oldDef.get('isCached')) {
+          oldDef.deleteRecord();
+        }
+      } else {
+        this.set('customServiceDef', '');
+      }
+    },
+
+    removeFromSavedList(list) {
+      if (list.active) {
+        this.send('clearConfigs');
+      }
+      if (this.get('isStandardViewType')) {
+        this.get('savedStandardTemplates').removeObject(list);
+      } else {
+        this.get('savedJsonTemplates').removeObject(list);
+      }
+    },
+
+    clearServiceResponse() {
+      this.set('serviceResp', null);
+    }
+  },
+
+  didInsertElement() {
+    var self = this;
+    self.$().find('.modal').on('shown.bs.modal', function() {
+      self.$().find('.modal.in').find('input.form-control:first').focus();
+    });
+  },
+
+  selectActiveListItem(item) {
+    this.unselectAllSavedList();
+    Ember.set(item, 'active', true);
+  },
+
+  unselectAllSavedList() {
+    this.get('getSavedList').forEach(function(item) {
+      Ember.set(item, 'active', false);
+    });
+  },
+
+  getSavedList: Ember.computed('viewType', function() {
+    if (this.get('isStandardViewType')) {
+      return this.get('savedStandardTemplates');
+    } else {
+      return this.get('savedJsonTemplates');
+    }
+  }),
+
+  getStore: function() {
+    return this.get('serviceDef.store');
+  },
+
+  isStandardViewType: Ember.computed.equal('viewType', 'standard'),
+
+  isCustomViewType: Ember.computed.equal('viewType', 'custom'),
+
+  isValidTemplateName: Ember.computed.notEmpty('savedTemplateName'),
+
+  isValidServiceDef: Ember.computed('serviceDef.name', 'serviceDef.queue', 'serviceDef.serviceComponents.[]', function () {
+    return this.get('serviceDef').isValidServiceDef();
+  }),
+
+  isValidCustomServiceDef: Ember.computed.notEmpty('customServiceDef'),
+
+  enableSaveOrDeployBtn: Ember.computed('isValidServiceDef', 'isValidCustomServiceDef', 'viewType', 'isLoading', function() {
+    if (this.get('isLoading')) {
+      return false;
+    }
+    if (this.get('isStandardViewType')) {
+      return this.get('isValidServiceDef');
+    } else {
+      return this.get('isValidCustomServiceDef');
+    }
+  })
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/fileconfig-viewer-dialog.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/fileconfig-viewer-dialog.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/fileconfig-viewer-dialog.js
new file mode 100644
index 0000000..d4912768
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/fileconfig-viewer-dialog.js
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+  dialogId: "fileconfig_viewer_dialog",
+  title: "File Configuration Properties",
+  props: null,
+  customProps: Ember.computed('props', function() {
+    var custom = [];
+    var props = this.get('props');
+    for (var pro in props) {
+      custom.push({
+        name: pro,
+        value: props[pro]
+      });
+    }
+    return custom;
+  })
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/info-tooltip.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/info-tooltip.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/info-tooltip.js
new file mode 100644
index 0000000..605b611
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/info-tooltip.js
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+import InfoSeeder from 'yarn-ui/utils/info-seeder';
+
+export default Ember.Component.extend({
+  classNames: ['tooltip', 'info-tooltip'],
+  elementId: 'info_tooltip_wrapper',
+
+  didInsertElement() {
+    var $tooltip = Ember.$('#info_tooltip_wrapper');
+    Ember.$('body').on('mouseenter', '.info-icon', function() {
+      var $elem = Ember.$(this);
+      var info = InfoSeeder[$elem.data('info')];
+      var offset = $elem.offset();
+      $tooltip.show();
+      $tooltip.find("#tooltip_content").text(info);
+      $tooltip.offset({top: offset.top + 20, left: offset.left - 10});
+    }).on('mouseleave', '.info-icon', function() {
+      $tooltip.find("#tooltip_content").text('');
+      $tooltip.hide();
+    });
+  },
+
+  WillDestroyElement() {
+    Ember.$('body').off('hover');
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js
new file mode 100644
index 0000000..5a9ae30
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+  serviceDef: null,
+  currentComponent: null,
+  duplicateNameError: false,
+
+  actions: {
+    showAddComponentModal() {
+      var newComp = this.get('serviceDef').createNewServiceComponent();
+      this.set('currentComponent', newComp);
+      this.set('duplicateNameError', false);
+      this.$('#addComponentModal').modal('show');
+    },
+
+    addNewComponent() {
+      this.set('duplicateNameError', false);
+      if (this.isCurrentNameDuplicate()) {
+        this.set('duplicateNameError', true);
+        return;
+      }
+      this.get('serviceDef.serviceComponents').addObject(this.get('currentComponent'));
+      this.$('#addComponentModal').modal('hide');
+    },
+
+    removeComponent(component) {
+      this.get('serviceDef.serviceComponents').removeObject(component);
+    }
+  },
+
+  isCurrentNameDuplicate() {
+    var currName = this.get('currentComponent.name');
+    var item = this.get('serviceDef.serviceComponents').findBy('name', currName);
+    return !Ember.isNone(item);
+  },
+
+  isValidCurrentComponent: Ember.computed.and('currentComponent', 'currentComponent.name', 'currentComponent.cpus', 'currentComponent.memory', 'currentComponent.numOfContainers', 'currentComponent.artifactId', 'currentComponent.launchCommand')
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-config-table.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-config-table.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-config-table.js
new file mode 100644
index 0000000..b0a78dd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-config-table.js
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+  serviceDef: null,
+  currentConfig: null,
+  serviceConfigJson: '',
+
+  actions: {
+    showNewConfigurationModal() {
+      var newConfig = this.get('serviceDef').createNewServiceConfig();
+      this.set('currentConfig', newConfig);
+      this.$('#addConfigurationModal').modal('show');
+      if (this.get('isNonEmptyComponents') && this.get('currentConfig.componentName') === '') {
+        this.set('currentConfig.componentName', this.get('componentNames.firstObject'));
+      }
+    },
+
+    removeConfiguration(config) {
+      this.get('serviceDef.serviceConfigs').removeObject(config);
+    },
+
+    configTypeChanged(type) {
+      this.set('currentConfig.type', type);
+      if (type === 'quicklink') {
+        this.set('currentConfig.scope', 'service');
+        this.set('currentConfig.componentName', '');
+      }
+    },
+
+    addNewConfiguration() {
+      this.get('serviceDef.serviceConfigs').addObject(this.get('currentConfig'));
+      this.$('#addConfigurationModal').modal('hide');
+    },
+
+    showServiceConfigUploadModal() {
+      this.set('serviceConfigJson', '');
+      this.$("#service_config_upload_modal").modal('show');
+    },
+
+    uploadServiceConfig(json) {
+      this.get('serviceDef').convertJsonServiceConfigs(json);
+      this.$("#service_config_upload_modal").modal('hide');
+    },
+
+    configScopeChanged(scope) {
+      this.set('currentConfig.scope', scope);
+    },
+
+    scopeComponentChanged(name) {
+      this.set('currentConfig.componentName', name);
+    }
+  },
+
+  isNonEmptyComponents: Ember.computed('serviceDef.serviceComponents.length', function() {
+    return this.get('serviceDef.serviceComponents.length') > 0;
+  }),
+
+  isNotQuicklink: Ember.computed('currentConfig.type', function() {
+    return this.get('currentConfig.type') !== "quicklink";
+  }),
+
+  componentNames: Ember.computed('serviceDef.serviceComponents.[]', function() {
+    var names = [];
+    this.get('serviceDef.serviceComponents').forEach(function(cmp) {
+      names.push(cmp.get('name'));
+    });
+    return names;
+  }),
+
+  isValidCurrentConfig: Ember.computed.and('currentConfig', 'currentConfig.name', 'currentConfig.value')
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-fileconfig-table.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-fileconfig-table.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-fileconfig-table.js
new file mode 100644
index 0000000..7c06152
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-fileconfig-table.js
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+  serviceDef: null,
+  currentFileConfig: null,
+  fileConfigJson: '',
+  fileConfigProps: '',
+  propertyViewer: null,
+  parseError: '',
+
+  actions: {
+    showNewConfigFileModal() {
+      var newFile = this.get('serviceDef').createNewFileConfig();
+      this.set('currentFileConfig', newFile);
+      this.set('fileConfigProps', '');
+      this.set('parseError', '');
+      this.$('#addFileConfigModal').modal('show');
+      if (this.get('isNonEmptyComponents') && this.get('currentFileConfig.componentName') === '') {
+        this.set('currentFileConfig.componentName', this.get('componentNames.firstObject'));
+      }
+    },
+
+    removeFileConfiguration(file) {
+      this.get('serviceDef.fileConfigs').removeObject(file);
+    },
+
+    addNewFileConfig() {
+      this.set('parseError', '');
+      var props = this.get('fileConfigProps');
+      if (props) {
+        try {
+          var parsed = JSON.parse(props);
+          this.set('currentFileConfig.props', parsed);
+        } catch (err) {
+          this.set('parseError', `Invalid JSON: ${err.message}`);
+          throw err;
+        }
+      }
+      this.get('serviceDef.fileConfigs').addObject(this.get('currentFileConfig'));
+      this.$('#addFileConfigModal').modal('hide');
+    },
+
+    showFileConfigUploadModal() {
+      this.set('fileConfigJson', '');
+      this.$("#service_file_config_upload_modal").modal('show');
+    },
+
+    uploadFileConfig(json) {
+      this.get('serviceDef').convertJsonFileConfigs(json);
+      this.$("#service_file_config_upload_modal").modal('hide');
+    },
+
+    configScopeChanged(scope) {
+      this.set('currentFileConfig.scope', scope);
+    },
+
+    scopeComponentChanged(name) {
+      this.set('currentFileConfig.componentName', name);
+    },
+
+    configTypeChanged(type) {
+      this.set('currentFileConfig.type', type);
+      if (type === "TEMPLATE") {
+        this.set('currentFileConfig.props', null);
+        this.set('fileConfigProps', '');
+      }
+    },
+
+    showFileConfigPropertyViewer(props) {
+      this.set('propertyViewer', props);
+      this.$("#file_config_properties_viewer").modal('show');
+    }
+  },
+
+  isNonEmptyComponents: Ember.computed('serviceDef.serviceComponents.length', function() {
+    return this.get('serviceDef.serviceComponents.length') > 0;
+  }),
+
+  componentNames: Ember.computed('serviceDef.serviceComponents.[]', function() {
+    var names = [];
+    this.get('serviceDef.serviceComponents').forEach(function(cmp) {
+      names.push(cmp.get('name'));
+    });
+    return names;
+  }),
+
+  isValidCurrentFileConfig: Ember.computed('currentFileConfig', 'currentFileConfig.srcFile', 'currentFileConfig.destFile', 'fileConfigProps', function() {
+    return this.get('currentFileConfig') && this.get('currentFileConfig.destFile') && (this.get('currentFileConfig.srcFile') || this.get('fileConfigProps'));
+  }),
+
+  isConfigTypeHadoopXml: Ember.computed('currentFileConfig.type', function() {
+    return this.get('currentFileConfig.type') === 'HADOOP_XML';
+  })
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/upload-config.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/upload-config.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/upload-config.js
new file mode 100644
index 0000000..2f9dc9c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/upload-config.js
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+  dialogId: "config_upload_modal",
+  title: "Upload Configuration",
+  configJson: '',
+  parseErrorMsg: '',
+
+  actions: {
+    uploadConfig() {
+      var json = this.get('configJson');
+      try {
+        JSON.parse(json);
+        this.upateParseResults("");
+      } catch (ex) {
+        this.upateParseResults("Invalid JSON: " + ex.message);
+        throw ex;
+      }
+      if (!this.get('parseErrorMsg')) {
+        this.sendAction("uploadConfig", json);
+      }
+    }
+  },
+
+  didInsertElement() {
+    this.$('#' + this.get('dialogId')).on('shown.bs.modal', function() {
+      this.upateParseResults("");
+    }.bind(this));
+  },
+
+  isValidConfigJson: Ember.computed.notEmpty('configJson'),
+
+  upateParseResults(message) {
+    this.set('parseErrorMsg', message);
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-deploy-service.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-deploy-service.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-deploy-service.js
new file mode 100644
index 0000000..25d575f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-deploy-service.js
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Controller.extend({
+  breadcrumbs: [{
+    text: "Home",
+    routeName: 'application'
+  }, {
+    text: "Services",
+    routeName: 'yarn-services',
+  }, {
+    text: "New Service",
+    routeName: 'yarn-deploy-service',
+  }],
+
+  savedStandardTemplates: [],
+  savedJsonTemplates: [],
+  serviceResponse: null,
+  isLoading: false,
+
+  actions: {
+    deployServiceDef(serviceDef) {
+      var defjson = serviceDef.getServiceJSON();
+      this.deployServiceApp(defjson);
+    },
+
+    deployServiceJson(json) {
+      this.deployServiceApp(json);
+    }
+  },
+
+  gotoServices() {
+    Ember.run.later(this, function() {
+      this.set('serviceResponse', null);
+      this.transitionToRoute('yarn-services');
+    }, 1000);
+  },
+
+  deployServiceApp(requestJson) {
+    var self = this;
+    var adapter = this.store.adapterFor('yarn-servicedef');
+    this.set('isLoading', true);
+    adapter.deployService(requestJson).then(function() {
+      self.set('serviceResponse', {message: 'Service has been accepted successfully. Redirecting to services in a second.', type: 'success'});
+      self.gotoServices();
+    }, function(errmsg) {
+      self.set('serviceResponse', {message: errmsg, type: 'error'});
+    }).finally(function() {
+      self.set('isLoading', false);
+    });
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
new file mode 100644
index 0000000..0439fb4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
@@ -0,0 +1,278 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import DS from 'ember-data';
+import Ember from 'ember';
+
+export default DS.Model.extend({
+  name: DS.attr('string', {defaultValue: ''}),
+  queue: DS.attr('string', {defaultValue: ''}),
+  lifetime: DS.attr('string', {defaultValue: ''}),
+  isCached: DS.attr('boolean', {defaultValue: false}),
+
+  serviceComponents: DS.attr({defaultValue: function() {
+    return Ember.A();
+  }}),
+
+  serviceConfigs: DS.attr({defaultValue: function() {
+    return Ember.A();
+  }}),
+
+  fileConfigs: DS.attr({defaultValue: function() {
+    return Ember.A();
+  }}),
+
+  quicklinks: DS.attr({defaultValue: function() {
+    return {};
+  }}),
+
+  clear() {
+    this.set('name', '');
+    this.set('queue', '');
+    this.set('lifetime', '');
+    this.get('serviceComponents').clear();
+    this.get('serviceConfigs').clear();
+    this.get('fileConfigs').clear();
+    this.set('quicklinks', {});
+  },
+
+  isValidServiceDef() {
+    return this.get('name') !== '' && this.get('queue') !== '' &&  this.get('serviceComponents.length') > 0;
+  },
+
+  createNewServiceComponent() {
+    return Ember.Object.create({
+      name: '',
+      numOfContainers: '',
+      cpus: '',
+      memory: '',
+      artifactId: '',
+      artifactType: 'DOCKER',
+      launchCommand: '',
+      dependencies: [],
+      uniqueComponentSupport: false,
+      configuration: null
+    });
+  },
+
+  createNewServiceConfig(name, value) {
+    var Config = Ember.Object.extend({
+      name: name || '',
+      value: value || '',
+      type: 'property', // property OR env OR quicklink
+      scope: 'service', // service OR component
+      componentName: '',
+      capitalizedType: Ember.computed('type', function() {
+        return Ember.String.capitalize(this.get('type'));
+      }),
+      formattedScope: Ember.computed('scope', 'componentName', function() {
+        if (this.get('scope') !== 'service') {
+          return this.get('componentName') + ' [Component]';
+        }
+        return Ember.String.capitalize(this.get('scope'));
+      })
+    });
+    return Config.create();
+  },
+
+  createNewFileConfig(src, dest) {
+    var FileConfig = Ember.Object.extend({
+      type: 'TEMPLATE', // HADOOP_XML OR TEMPLATE
+      srcFile: src || '',
+      destFile: dest || '',
+      scope: 'service', // service OR component
+      componentName: '',
+      props: null,
+      formattedScope: Ember.computed('scope', 'componentName', function() {
+        if (this.get('scope') !== 'service') {
+          return this.get('componentName') + ' [Component]';
+        }
+        return Ember.String.capitalize(this.get('scope'));
+      })
+    });
+    return FileConfig.create();
+  },
+
+  getServiceJSON() {
+    return this.serializeServiceDef();
+  },
+
+  serializeServiceDef() {
+    var json = {
+      name: "",
+      queue: "",
+      lifetime: "-1",
+      components: [],
+      configuration: {
+        properties: {},
+        env: {},
+        files: []
+      },
+      quicklinks: {}
+    };
+
+    var components = this.get('serviceComponents');
+    var configs = this.get('serviceConfigs');
+    var fileConfigs = this.get('fileConfigs');
+
+    json['name'] = this.get('name');
+    json['queue'] = this.get('queue');
+
+    if (this.get('lifetime')) {
+      json['lifetime'] = this.get('lifetime');
+    }
+
+    components.forEach(function(component) {
+      json.components.push(this.serializeComponent(component));
+    }.bind(this));
+
+    configs.forEach(function(config) {
+      let conf = this.serializeConfiguration(config);
+      if (conf.scope === "service") {
+        if (conf.type === "property") {
+          json.configuration.properties[conf.name] = conf.value;
+        } else if (conf.type === "env") {
+          json.configuration.env[conf.name] = conf.value;
+        } else if (conf.type === "quicklink") {
+          json.quicklinks[conf.name] = conf.value;
+        }
+      } else if (conf.scope === "component") {
+        let requiredCmp = json.components.findBy('name', conf.componentName);
+        if (requiredCmp) {
+          requiredCmp.configuration = requiredCmp.configuration || {};
+          requiredCmp.configuration.properties = requiredCmp.configuration.properties || {};
+          requiredCmp.configuration.env = requiredCmp.configuration.env || {};
+          if (conf.type === "property") {
+            requiredCmp.configuration.properties[conf.name] = conf.value;
+          } else if (conf.type === "env") {
+            requiredCmp.configuration.env[conf.name] = conf.value;
+          }
+        }
+      }
+    }.bind(this));
+
+    fileConfigs.forEach(function(file) {
+      let scope = file.get('scope');
+      if (scope === "service") {
+        json.configuration.files.push(this.serializeFileConfig(file));
+      } else if (scope === "component") {
+        let requiredCmp = json.components.findBy('name', file.get('componentName'));
+        if (requiredCmp) {
+          requiredCmp.configuration = requiredCmp.configuration || {};
+          requiredCmp.configuration.files = requiredCmp.configuration.files || [];
+          requiredCmp.configuration.files.push(this.serializeFileConfig(file));
+        }
+      }
+    }.bind(this));
+
+    return json;
+  },
+
+  serializeComponent(record) {
+    var json = {};
+    json['name'] = record.get('name');
+    json['number_of_containers'] = record.get('numOfContainers');
+    json['launch_command'] = record.get('launchCommand');
+    json['dependencies'] = [];
+    json['artifact'] = {
+      id: record.get('artifactId'),
+      type: record.get('artifactType')
+    };
+    json['resource'] = {
+      cpus: record.get('cpus'),
+      memory: record.get('memory')
+    };
+    if (record.get('uniqueComponentSupport')) {
+      json['unique_component_support'] = "true";
+    }
+    if (record.get('configuration')) {
+      json['configuration'] = record.get('configuration');
+    }
+    return json;
+  },
+
+  serializeConfiguration(config) {
+    var json = {};
+    json["type"] = config.get('type');
+    json["scope"] = config.get('scope');
+    json["componentName"] = config.get('componentName');
+    json["name"] = config.get('name');
+    json["value"] = config.get('value');
+    return json;
+  },
+
+  serializeFileConfig(file) {
+    var json = {};
+    json["type"] = file.get('type');
+    json["dest_file"] = file.get('destFile');
+    json["src_file"] = file.get('srcFile');
+    if (file.get('type') === "HADOOP_XML" && file.get('props')) {
+      json["props"] = file.get('props');
+    }
+    return json;
+  },
+
+  createNewServiceDef() {
+    return this.get('store').createRecord('yarn-servicedef', {
+      id: 'yarn_servicedef_' + Date.now()
+    });
+  },
+
+  convertJsonServiceConfigs(json) {
+    var parsedJson = JSON.parse(json);
+    if (parsedJson.properties) {
+      for (let prop in parsedJson.properties) {
+        if (parsedJson.properties.hasOwnProperty(prop)) {
+          let newPropObj = this.createNewServiceConfig(prop, parsedJson.properties[prop]);
+          this.get('serviceConfigs').addObject(newPropObj);
+        }
+      }
+    }
+    if (parsedJson.env) {
+      for (let envprop in parsedJson.env) {
+        if (parsedJson.env.hasOwnProperty(envprop)) {
+          let newEnvObj = this.createNewServiceConfig(envprop, parsedJson.env[envprop]);
+          newEnvObj.set('type', 'env');
+          this.get('serviceConfigs').addObject(newEnvObj);
+        }
+      }
+    }
+  },
+
+  convertJsonFileConfigs(json) {
+    var parsedJson = JSON.parse(json);
+    if (parsedJson.files) {
+      parsedJson.files.forEach(function(file) {
+        let newFileObj = this.createNewFileConfig(file.src_file, file.dest_file);
+        this.get('fileConfigs').addObject(newFileObj);
+      }.bind(this));
+    }
+  },
+
+  cloneServiceDef() {
+    var clone = this.createNewServiceDef();
+    clone.set('name', this.get('name'));
+    clone.set('queue', this.get('queue'));
+    clone.set('lifetime', this.get('lifetime'));
+    clone.get('serviceComponents', this.get('serviceComponents'));
+    clone.get('serviceConfigs', this.get('serviceConfigs'));
+    clone.get('fileConfigs', this.get('fileConfigs'));
+    clone.set('quicklinks', this.get('quicklinks'));
+    return clone;
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js
index 9013142..9b3424b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js
@@ -48,6 +48,7 @@ Router.map(function() {
   this.route('yarn-container-log', { path:
       '/yarn-container-log/:node_id/:node_addr/:container_id/:filename' });
 
+  this.route('yarn-deploy-service');
   this.route('cluster-overview');
   this.route('yarn-app', function() {
     this.route('info', {path: '/:app_id/info'});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-deploy-service.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-deploy-service.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-deploy-service.js
new file mode 100644
index 0000000..05ef600
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-deploy-service.js
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Route.extend({
+  model() {
+    return this.store.createRecord('yarn-servicedef', {
+      id: 'yarn_servicedef_' + Date.now()
+    });
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js
index 807844e..9359530 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js
@@ -75,4 +75,8 @@ export default Ember.Service.extend({
   rmWebAddress: Ember.computed(function () {
     return this.normalizeURL(this.get("env.app.hosts.rmWebAddress"));
   }),
+
+  dashWebAddress: Ember.computed(function () {
+    return this.normalizeURL(this.get("env.app.hosts.dashWebAddress"));
+  })
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
index 8b8ea56..d246f2d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css
@@ -397,3 +397,167 @@ div.attempt-info-panel table > tbody > tr > td:last-of-type {
   width: 14px;
   display: inline-block;
 }
+
+.deploy-service textarea {
+  border-radius: 5px !important;
+  resize: none;
+  word-wrap: break-word;
+}
+
+.deploy-service .loading-state {
+  opacity: 0.5;
+}
+
+.deploy-service .loading-state img {
+  width: 80px;
+  height: 80px;
+  margin: 40px auto;
+  left: 50% !important;
+  position: absolute;
+  z-index: 9999;
+}
+
+.align-center {
+  text-align: center !important;
+}
+
+.bold-text {
+  font-weight: bold !important;
+}
+
+.deploy-service .saved-list {
+  min-height: 600px;
+}
+
+.deploy-service .glyphicon {
+  cursor: pointer;
+}
+
+.deploy-service .remove-icon:hover {
+  color: #d9534f;
+}
+
+.deploy-service .savedlist-column {
+  padding-top: 10px;
+}
+
+.deploy-service .definition-column {
+  padding-top: 10px;
+  border-left: 1px solid #ddd;
+}
+
+.deploy-service .content-area {
+  padding: 15px 0px;
+  border-top: 1px solid #ddd;
+}
+
+.deploy-service .custom-json-area {
+  padding: 10px 0;
+  margin-top: -26px;
+}
+
+.deploy-service-modal .modal-dialog {
+  width: 400px;
+}
+
+.deploy-service-modal .form-group {
+  margin-bottom: 5px;
+}
+
+.deploy-service .action-btns {
+  text-align: right;
+  padding-bottom: 15px;
+  padding-right: 0;
+}
+
+table.table-custom-action > thead > tr > th:last-of-type, table.table-custom-action > tbody > tr > td:last-of-type {
+  width: 50px !important;
+}
+
+.deploy-service .toggle-btn.active {
+  color: #fff;
+  background-color: #337ab7;
+  border-color: #337ab7;
+  text-shadow: none;
+}
+
+.deploy-service .service-resp {
+  word-wrap: break-word;
+}
+
+table.table-custom-bordered {
+  border: 1px solid #ddd !important;
+  border-radius: 3px !important;
+}
+
+table.table-custom-bordered > thead > tr > th, table.table-custom-bordered > tbody > tr > td {
+  border-bottom: 1px solid #ddd !important;
+  border-right: 1px solid #ddd !important;
+}
+
+table.table-custom-striped > thead > tr, .table-custom-striped > tbody > tr:nth-of-type(even) {
+  background-color: #f9f9f9 !important;
+}
+
+.deploy-service label.required:after, .deploy-service-modal label.required:after {
+  content: '*';
+  color: #d9534f;
+}
+
+.deploy-service .form-group.shrink-height {
+  margin-bottom: -8px;
+}
+
+table.fix-table-overflow {
+  table-layout: fixed;
+}
+
+table.fix-table-overflow > tbody > tr > td:last-of-type {
+  overflow: hidden;
+  text-overflow: ellipsis;
+}
+
+div.tooltip.info-tooltip {
+  font: 14px sans-serif !important;
+  background: lightsteelblue;
+  word-wrap: break-word;
+  position: absolute;
+  text-align: justify;
+  border-radius: 3px;
+  z-index: 9999;
+  padding: 10px;
+  display: none;
+  min-width: 200px;
+  max-width: 500px;
+  opacity: 1;
+  top: 10px;
+  left: 0;
+}
+
+div.tooltip.info-tooltip > span.top-arrow {
+  color: lightsteelblue;
+  position: absolute;
+  top: -10px;
+  left: 10px;
+}
+
+span.info-icon {
+  color: #337ab7 !important;
+}
+
+div.service-action-mask {
+  position: absolute;
+  opacity: 0.5;
+  z-index: 9999;
+  width: 100%;
+  height: 100%;
+}
+
+div.service-action-mask img {
+  position: absolute;
+  width: 80px;
+  height: 80px;
+  margin: 40px auto;
+  left: 45% !important;
+  z-index: 9999;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
index e988e0c..a08ff72 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
@@ -92,3 +92,5 @@
     </div>
   </div>
 </div>
+
+{{info-tooltip}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/breadcrumb-bar.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/breadcrumb-bar.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/breadcrumb-bar.hbs
index 24acbd9..54229cc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/breadcrumb-bar.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/breadcrumb-bar.hbs
@@ -18,5 +18,7 @@
 
 <div class="col-md-12 container-fluid breadcrumb-bar">
   {{em-breadcrumbs items=breadcrumbs}}
-  <button type="button" class="btn btn-sm btn-primary refresh" {{action "refresh"}}>Refresh</button>
+  {{#unless hideRefresh}}
+    <button type="button" class="btn btn-sm btn-primary refresh" {{action "refresh"}}>Refresh</button>
+  {{/unless}}
 </div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs
new file mode 100644
index 0000000..a098ec3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs
@@ -0,0 +1,157 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="container-fluid deploy-service">
+  {{#if serviceResp}}
+    <div class="row">
+      <div class="col-md-12">
+        <div class="panel panel-default service-resp">
+          <div class="panel-body {{if (eq serviceResp.type 'error') 'bg-danger' 'bg-success'}}">
+            <span class="glyphicon glyphicon-remove pull-right remove-icon" {{action "clearServiceResponse"}}></span>
+            <strong class="{{if (eq serviceResp.type 'error') 'text-danger' 'text-success'}}">{{serviceResp.message}}</strong>
+          </div>
+        </div>
+      </div>
+    </div>
+  {{/if}}
+  <div class="panel panel-default {{if isLoading 'loading-state'}}">
+    {{#if isLoading}}
+      <img src="assets/images/spinner.gif" alt="Loading...">
+    {{/if}}
+    <div class="row">
+      <div class="col-md-12">
+        <div class="col-md-2 savedlist-column">
+          <label>Saved Templates</label>
+          <div class="panel panel-default saved-list">
+            <ul class="list-group">
+              {{#each getSavedList as |list|}}
+                <a href="#" class="list-group-item {{if list.active 'active'}}" {{action "updateServiceDef" list}}>
+                  {{list.name}}
+                  <span class="glyphicon glyphicon-remove pull-right remove-icon" {{action "removeFromSavedList" list}}></span>
+                </a>
+              {{else}}
+                <li class="list-group-item align-center">No saved templates</li>
+              {{/each}}
+            </ul>
+          </div>
+        </div>
+
+        <div class="col-md-10 definition-column">
+          <label>Service Definition</label>
+          <div class="btn-group pull-right" data-toggle="buttons">
+            <label class="btn btn-default btn-sm toggle-btn active" {{action "updateViewType" "standard"}}>
+              <input type="radio" name="custom" checked><b>Standard</b>
+            </label>
+            <label class="btn btn-default btn-sm toggle-btn" {{action "updateViewType" "custom"}}>
+              <input type="radio" name="custom"><b>Custom</b>
+            </label>
+          </div>
+
+          <div class="col-md-12 content-area">
+            {{#if isStandardViewType}}
+
+              <div class="row">
+                <div class="col-md-4">
+                  <div class="form-group shrink-height">
+                    <label class="required">Service Name</label>
+                    <span class="glyphicon glyphicon-info-sign info-icon" data-info="serviceName"></span>
+                    {{input type="text" class="form-control" placeholder="Service Name" value=serviceDef.name}}
+                  </div>
+                  <br>
+                </div>
+              </div>
+
+              <div class="row">
+                <div class="col-md-4">
+                  <div class="form-group shrink-height">
+                    <label class="required">Queue Name</label>
+                    <span class="glyphicon glyphicon-info-sign info-icon" data-info="queueName"></span>
+                    {{input type="text" class="form-control" placeholder="Queue Name" value=serviceDef.queue}}
+                  </div>
+                  <br>
+                </div>
+              </div>
+
+              <div class="row">
+                <div class="col-md-4">
+                  <div class="form-group">
+                    <label>Service Lifetime</label>
+                    <span class="glyphicon glyphicon-info-sign info-icon" data-info="lifetime"></span>
+                    {{input type="number" min="0" class="form-control" placeholder="Service Lifetime (Seconds)" value=serviceDef.lifetime}}
+                  </div>
+                  <br>
+                </div>
+              </div>
+
+              <div class="row">
+                {{service-component-table serviceDef=serviceDef applicationCtrl=applicationCtrl}}
+              </div>
+
+              <div class="row">
+                {{service-config-table serviceDef=serviceDef}}
+              </div>
+
+              <div class="row">
+                {{service-fileconfig-table serviceDef=serviceDef}}
+              </div>
+            {{/if}}
+
+            {{#if isCustomViewType}}
+            <div class="form-group custom-json-area">
+              {{textarea class="form-control" rows="29" cols="120" value=customServiceDef placeholder="Service JSON configuration here..."}}
+            </div>
+            {{/if}}
+          </div>
+
+          <div class="col-md-12 action-btns">
+            <button class="btn btn-default btn-sm" {{action "clearConfigs"}} disabled={{if isLoading "disabled"}}>
+              Reset
+            </button>
+            <button class="btn btn-primary btn-sm" disabled={{unless enableSaveOrDeployBtn "disabled"}} {{action "showSaveTemplateModal"}}>
+              Save
+            </button>
+            <button class="btn btn-success btn-sm" disabled={{unless enableSaveOrDeployBtn "disabled"}} {{action "deployService"}}>
+              Deploy
+            </button>
+          </div>
+        </div>
+      </div>
+    </div>
+  </div>
+</div>
+
+<div class="modal fade deploy-service-modal" tabindex="-1" role="dialog" id="saveListModal">
+  <div class="modal-dialog" role="document">
+    <div class="modal-content">
+      <div class="modal-header">
+        <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+        <h4 class="modal-title">Save Template As</h4>
+      </div>
+      <div class="modal-body">
+        <div class="form-group">
+          <label>Template Name</label>
+          {{input type="text" class="form-control" id="templateNameInput" value=savedTemplateName}}
+        </div>
+      </div>
+      <div class="modal-footer">
+        <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+        <button type="button" class="btn btn-primary" {{action "addToSavedList"}} disabled={{unless isValidTemplateName "disabled"}}>Add</button>
+      </div>
+    </div>
+  </div>
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/fileconfig-viewer-dialog.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/fileconfig-viewer-dialog.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/fileconfig-viewer-dialog.hbs
new file mode 100644
index 0000000..1420340
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/fileconfig-viewer-dialog.hbs
@@ -0,0 +1,53 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="modal fade" tabindex="-1" role="dialog" id="{{dialogId}}">
+  <div class="modal-dialog" role="document" style="width: 700px;">
+    <div class="modal-content">
+      <div class="modal-header">
+        <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+        <h4 class="modal-title bold-text">{{title}}</h4>
+      </div>
+      <div class="modal-body" style="padding: 0;">
+        <table class="table table-hover table-custom-bordered table-custom-striped fix-table-overflow" style="max-width: 700px;">
+          <thead>
+            <tr>
+              <th>Name</th>
+              <th>Value</th>
+            </tr>
+          </thead>
+          <tbody>
+            {{#each customProps as |prop|}}
+              <tr>
+                <td>{{prop.name}}</td>
+                <td title="{{prop.value}}">{{prop.value}}</td>
+              </tr>
+            {{else}}
+              <tr>
+                <td colspan="2">No data available</td>
+              </tr>
+            {{/each}}
+          </tbody>
+        </table>
+      </div>
+      <div class="modal-footer">
+        <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+      </div>
+    </div>
+  </div>
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/info-tooltip.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/info-tooltip.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/info-tooltip.hbs
new file mode 100644
index 0000000..faba135
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/info-tooltip.hbs
@@ -0,0 +1,20 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<span class="glyphicon glyphicon-triangle-top top-arrow"></span>
+<div id="tooltip_content"></div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
new file mode 100644
index 0000000..8f3904d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
@@ -0,0 +1,113 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="col-md-12">
+  <div class="form-group">
+    <label>Service Components</label>
+    <span class="glyphicon glyphicon-info-sign info-icon" data-info="components"></span>
+    <button class="btn btn-primary btn-xs pull-right" {{action "showAddComponentModal"}}>
+      <span class="glyphicon glyphicon-plus"></span>
+    </button>
+    <div class="panel panel-default">
+      <table class="table table-hover table-custom-bordered table-custom-striped table-custom-action">
+        <thead>
+          <tr>
+            <th>Component Name</th>
+            <th>CPU</th>
+            <th>Memory</th>
+            <th># Containers</th>
+            <th>Artifact Id</th>
+            <th>Launch Command</th>
+            <th></th>
+          </tr>
+        </thead>
+        <tbody>
+          {{#each serviceDef.serviceComponents as |component|}}
+            <tr>
+              <td>{{component.name}}</td>
+              <td>{{component.cpus}}</td>
+              <td>{{component.memory}}</td>
+              <td>{{component.numOfContainers}}</td>
+              <td>{{component.artifactId}}</td>
+              <td>{{component.launchCommand}}</td>
+              <td class="align-center">
+                <span class="glyphicon glyphicon-remove remove-icon" {{action "removeComponent" component}}></span>
+              </td>
+            </tr>
+          {{else}}
+            <tr class="align-center">
+              <td colspan="7">No data available</td>
+            </tr>
+          {{/each}}
+        </tbody>
+      </table>
+    </div>
+  </div>
+</div>
+
+<div class="modal fade deploy-service-modal" tabindex="-1" role="dialog" id="addComponentModal">
+  <div class="modal-dialog" role="document">
+    <div class="modal-content">
+      <div class="modal-header">
+        <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+        <h4 class="modal-title">Add Component</h4>
+      </div>
+      <div class="modal-body">
+        {{#if duplicateNameError}}
+          <div class="alert alert-danger alert-dismissible" role="alert">
+            <strong>Component name already exists</strong>
+          </div>
+        {{/if}}
+        <div class="form-group">
+          <label class="required">Component Name</label>
+          {{input type="text" class="form-control" value=currentComponent.name}}
+        </div>
+        <div class="form-group">
+          <label class="required">CPU</label>
+          {{input type="number" min="0" class="form-control" value=currentComponent.cpus}}
+        </div>
+        <div class="form-group">
+          <label class="required">Memory</label>
+          {{input type="number" min="0" class="form-control" value=currentComponent.memory}}
+        </div>
+        <div class="form-group">
+          <label class="required"># Containers</label>
+          {{input type="number" min="0" class="form-control" value=currentComponent.numOfContainers}}
+        </div>
+        <div class="form-group">
+          <label class="required">Artifact Id</label>
+          {{input type="text" class="form-control" value=currentComponent.artifactId}}
+        </div>
+        <div class="form-group">
+          <label class="required">Launch Command</label>
+          {{input type="text" class="form-control" value=currentComponent.launchCommand}}
+        </div>
+        <div class="form-group">
+          <label class="checkbox-inline">
+            {{input type="checkbox" checked=currentComponent.uniqueComponentSupport}}
+            Unique Component Support
+          </label>
+        </div>
+      </div>
+      <div class="modal-footer">
+        <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+        <button type="button" class="btn btn-primary" {{action "addNewComponent"}} disabled={{unless isValidCurrentComponent "disabled"}}>Add</button>
+      </div>
+    </div>
+  </div>
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-config-table.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-config-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-config-table.hbs
new file mode 100644
index 0000000..46a66ee
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-config-table.hbs
@@ -0,0 +1,130 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="col-md-12">
+  <div class="form-group">
+    <label>Service Configurations</label>
+    <span class="glyphicon glyphicon-info-sign info-icon" data-info="configurations"></span>
+    <button class="btn btn-primary btn-xs pull-right" {{action "showNewConfigurationModal"}}>
+      <span class="glyphicon glyphicon-plus"></span>
+    </button>
+    <button class="btn btn-primary btn-xs pull-right" style="margin-right: 5px;" {{action "showServiceConfigUploadModal"}}>
+      <span class="glyphicon glyphicon-open"></span>
+    </button>
+    <div class="panel panel-default">
+      <table class="table table-hover table-custom-bordered table-custom-striped table-custom-action">
+        <thead>
+          <tr>
+            <th>Name</th>
+            <th>Value</th>
+            <th>Type</th>
+            <th>Scope</th>
+            <th></th>
+          </tr>
+        </thead>
+        <tbody>
+          {{#each serviceDef.serviceConfigs as |config|}}
+            <tr>
+              <td>{{config.name}}</td>
+              <td>{{config.value}}</td>
+              <td>{{config.capitalizedType}}</td>
+              <td>{{config.formattedScope}}</td>
+              <td>
+                <span class="glyphicon glyphicon-remove remove-icon" {{action "removeConfiguration" config}}></span>
+              </td>
+            </tr>
+          {{else}}
+            <tr class="align-center">
+              <td colspan="5">No data available</td>
+            </tr>
+          {{/each}}
+        </tbody>
+      </table>
+    </div>
+  </div>
+</div>
+
+<div class="modal fade deploy-service-modal" tabindex="-1" role="dialog" id="addConfigurationModal">
+  <div class="modal-dialog" role="document">
+    <div class="modal-content">
+      <div class="modal-header">
+        <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+        <h4 class="modal-title bold-text">Add Configuration</h4>
+      </div>
+      <div class="modal-body">
+        <div class="form-group">
+          <label class="required">Name</label>
+          {{input type="text" class="form-control" value=currentConfig.name}}
+        </div>
+        <div class="form-group">
+          <label class="required">Value</label>
+          {{input type="text" class="form-control" value=currentConfig.value}}
+        </div>
+        <div class="form-group">
+          <label class="required">Type</label>
+          <div>
+            <label class="radio-inline">
+              <input type="radio" name="type" value="property" checked={{eq currentConfig.type "property"}} onchange={{action "configTypeChanged" "property"}}>Property
+            </label>
+            <label class="radio-inline">
+              <input type="radio" name="type" value="env" checked={{eq currentConfig.type "env"}} onchange={{action "configTypeChanged" "env"}}>Env
+            </label>
+            <label class="radio-inline">
+              <input type="radio" name="type" value="quicklink" checked={{eq currentConfig.type "quicklink"}} onchange={{action "configTypeChanged" "quicklink"}}>Quicklink
+            </label>
+          </div>
+        </div>
+        {{#if isNotQuicklink}}
+          <div class="form-group">
+            <label class="required">Scope</label>
+            <div>
+              <label class="radio-inline">
+                <input type="radio" name="scope" value="service" checked={{eq currentConfig.scope "service"}} onchange={{action "configScopeChanged" "service"}}>Service
+              </label>
+              {{#if isNonEmptyComponents}}
+                <label class="radio-inline">
+                  <input type="radio" name="scope" value="component" checked={{eq currentConfig.scope "component"}} onchange={{action "configScopeChanged" "component"}}>Component
+                </label>
+              {{/if}}
+            </div>
+          </div>
+          {{#if (eq currentConfig.scope "component")}}
+            <div class="form-group">
+              <select class="form-control" onchange={{action "scopeComponentChanged" value="target.value"}}>
+                {{#each componentNames as |name|}}
+                  <option value="{{name}}" selected={{eq currentConfig.componentName name}}>{{name}}</option>
+                {{/each}}
+              </select>
+            </div>
+          {{/if}}
+        {{/if}}
+      </div>
+      <div class="modal-footer">
+        <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+        <button type="button" class="btn btn-primary" {{action "addNewConfiguration"}} disabled={{unless isValidCurrentConfig "disabled"}}>Add</button>
+      </div>
+    </div>
+  </div>
+</div>
+
+{{upload-config
+  dialogId="service_config_upload_modal"
+  title="Upload Service Configurations"
+  configJson=serviceConfigJson
+  uploadConfig="uploadServiceConfig"
+}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-fileconfig-table.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-fileconfig-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-fileconfig-table.hbs
new file mode 100644
index 0000000..97442c6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-fileconfig-table.hbs
@@ -0,0 +1,152 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="col-md-12">
+  <div class="form-group">
+    <label>File Configurations</label>
+    <span class="glyphicon glyphicon-info-sign info-icon" data-info="fileConfigs"></span>
+    <button class="btn btn-primary btn-xs pull-right" {{action "showNewConfigFileModal"}}>
+      <span class="glyphicon glyphicon-plus"></span>
+    </button>
+    <button class="btn btn-primary btn-xs pull-right" style="margin-right: 5px;" {{action "showFileConfigUploadModal"}}>
+      <span class="glyphicon glyphicon-open"></span>
+    </button>
+    <div class="panel panel-default">
+      <table class="table table-hover table-custom-bordered table-custom-striped table-custom-action">
+        <thead>
+          <tr>
+            <th>Source File</th>
+            <th>Properties</th>
+            <th>Destination File</th>
+            <th>Type</th>
+            <th>Scope</th>
+            <th></th>
+          </tr>
+        </thead>
+        <tbody>
+          {{#each serviceDef.fileConfigs as |file|}}
+            <tr>
+              <td>
+                {{#if file.srcFile}}
+                  {{file.srcFile}}
+                {{else}}
+                  <span>N/A</span>
+                {{/if}}
+              </td>
+              <td>
+                {{#if file.props}}
+                  <a href="#" {{action "showFileConfigPropertyViewer" file.props}}>View Properties</a>
+                {{else}}
+                  <span>N/A</span>
+                {{/if}}
+              </td>
+              <td>{{file.destFile}}</td>
+              <td>{{file.type}}</td>
+              <td>{{file.formattedScope}}</td>
+              <td class="align-center">
+                <span class="glyphicon glyphicon-remove remove-icon" {{action "removeFileConfiguration" file}}></span>
+              </td>
+            </tr>
+          {{else}}
+            <tr class="align-center">
+              <td colspan="6">No data available</td>
+            </tr>
+          {{/each}}
+        </tbody>
+      </table>
+    </div>
+  </div>
+</div>
+
+<div class="modal fade deploy-service-modal" tabindex="-1" role="dialog" id="addFileConfigModal">
+  <div class="modal-dialog" role="document" style="width: 500px;">
+    <div class="modal-content">
+      <div class="modal-header">
+        <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+        <h4 class="modal-title bold-text">Add File Configuration</h4>
+      </div>
+      <div class="modal-body">
+        {{#if parseError}}
+          <div class="alert alert-danger alert-dismissible" role="alert">
+            <strong>{{parseError}}</strong>
+          </div>
+        {{/if}}
+        <div class="form-group">
+          <label class="required">Type</label>
+          <div>
+            <label class="radio-inline">
+              <input type="radio" name="type" value="TEMPLATE" checked={{eq currentFileConfig.type "TEMPLATE"}} onchange={{action "configTypeChanged" "TEMPLATE"}}>TEMPLATE
+            </label>
+            <label class="radio-inline">
+              <input type="radio" name="type" value="HADOOP_XML" checked={{eq currentFileConfig.type "HADOOP_XML"}} onchange={{action "configTypeChanged" "HADOOP_XML"}}>HADOOP_XML
+            </label>
+          </div>
+        </div>
+        <div class="form-group">
+          <label class={{unless isConfigTypeHadoopXml "required"}}>Source File</label>
+          {{input type="text" class="form-control" value=currentFileConfig.srcFile}}
+        </div>
+        {{#if isConfigTypeHadoopXml}}
+          <div class="form-group">
+            <label>Properties</label> <span>(Source File and/or Properties are required)</span>
+            {{textarea class="form-control" rows="15" value=fileConfigProps placeholder="Configuration file properties here..."}}
+          </div>
+        {{/if}}
+        <div class="form-group">
+          <label class="required">Destination File</label>
+          {{input type="text" class="form-control" value=currentFileConfig.destFile}}
+        </div>
+        <div class="form-group">
+          <label class="required">Scope</label>
+          <div>
+            <label class="radio-inline">
+              <input type="radio" name="scope" value="service" checked={{eq currentFileConfig.scope "service"}} onchange={{action "configScopeChanged" "service"}}>Service
+            </label>
+            {{#if isNonEmptyComponents}}
+              <label class="radio-inline">
+                <input type="radio" name="scope" value="component" checked={{eq currentFileConfig.scope "component"}} onchange={{action "configScopeChanged" "component"}}>Component
+              </label>
+            {{/if}}
+          </div>
+        </div>
+        {{#if (eq currentFileConfig.scope "component")}}
+          <div class="form-group">
+            <select class="form-control" onchange={{action "scopeComponentChanged" value="target.value"}}>
+              {{#each componentNames as |name|}}
+                <option value="{{name}}" selected={{eq currentFileConfig.componentName name}}>{{name}}</option>
+              {{/each}}
+            </select>
+          </div>
+        {{/if}}
+      </div>
+      <div class="modal-footer">
+        <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+        <button type="button" class="btn btn-primary" {{action "addNewFileConfig"}} disabled={{unless isValidCurrentFileConfig "disabled"}}>Add</button>
+      </div>
+    </div>
+  </div>
+</div>
+
+{{upload-config
+  dialogId="service_file_config_upload_modal"
+  title="Upload File Configurations"
+  configJson=fileConfigJson
+  uploadConfig="uploadFileConfig"
+}}
+
+{{fileconfig-viewer-dialog dialogId="file_config_properties_viewer" props=propertyViewer}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/upload-config.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/upload-config.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/upload-config.hbs
new file mode 100644
index 0000000..045fb0f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/upload-config.hbs
@@ -0,0 +1,44 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="modal fade" tabindex="-1" role="dialog" id="{{dialogId}}">
+  <div class="modal-dialog" role="document">
+    <div class="modal-content">
+      <div class="modal-header">
+        <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button>
+        <h4 class="modal-title bold-text">{{title}}</h4>
+      </div>
+      <div class="modal-body">
+        <div class="form-group">
+          {{textarea class="form-control" rows="20" cols="100" value=configJson placeholder="JSON Configuration Here..."}}
+        </div>
+        {{#if isParseError}}
+          <div class="panel panel-default">
+            <div class="panel-body bg-danger">
+              <strong>{{parseErrorMsg}}</strong>
+            </div>
+          </div>
+        {{/if}}
+      </div>
+      <div class="modal-footer">
+        <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+        <button type="button" class="btn btn-primary" {{action "uploadConfig"}} disabled={{unless isValidConfigJson "disabled"}}>Upload</button>
+      </div>
+    </div>
+  </div>
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81cb5326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-deploy-service.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-deploy-service.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-deploy-service.hbs
new file mode 100644
index 0000000..98bc917
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-deploy-service.hbs
@@ -0,0 +1,33 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+{{breadcrumb-bar breadcrumbs=breadcrumbs hideRefresh=true}}
+
+<div class="col-md-12 container-fluid">
+  <div class="row">
+    {{deploy-service
+      savedStandardTemplates=savedStandardTemplates
+      savedJsonTemplates=savedJsonTemplates
+      serviceDef=model
+      serviceResp=serviceResponse
+      isLoading=isLoading
+      deployServiceDef="deployServiceDef"
+      deployServiceJson="deployServiceJson"
+    }}
+  </div>
+</div>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: YARN-6398. Support to add native-service specific details in new YARN UI. Contributed by Akhil PB.

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs
index 3cfec33..534869e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs
@@ -22,11 +22,34 @@
   </div>
 </div>
 
+{{#if actionResponse}}
+  <div class="row">
+    <div class="col-md-12">
+      <div class="alert alert-dismissible {{if (eq actionResponse.type 'error') 'alert-danger' 'alert-success'}}" role="alert">
+        <button class="close" data-dismiss="alert" aria-label="Close" {{action "resetActionResponse"}}><span aria-hidden="true">&times;</span></button>
+        <strong>{{actionResponse.msg}}</strong>
+      </div>
+    </div>
+  </div>
+{{/if}}
+
+{{#if isLoading}}
+  <div class="panel panel-default service-action-mask">
+    <img src="assets/images/spinner.gif" alt="Loading...">
+  </div>
+{{/if}}
+
 <div class="row">
   <div class="col-md-12 container-fluid">
     <div class="panel panel-default">
       <div class="panel-heading">
         Basic Info
+        {{#if isRunningService}}
+          <div class="pull-right" style="display: inline-block; margin: -4px -10px 0 0;">
+            <button class="btn btn-sm btn-danger" disabled="{{if isLoading 'disabled'}}" {{action "showStopServiceConfirm"}}> Stop </button>
+            <button class="btn btn-sm btn-danger" disabled="{{if isLoading 'disabled'}}" {{action "showDeleteServiceConfirm"}}> Delete </button>
+          </div>
+        {{/if}}
       </div>
       <div class="x-scroll">
         <table class="display table table-striped table-bordered"
@@ -116,24 +139,25 @@
       <table class="display table table-striped table-bordered"
              cellspacing="0" width="100%">
         <thead>
-          <tr>
-            <th>Allocated Resource</th>
-            <th>Running Containers</th>
-            <th>Preempted Resource</th>
-            <th>Num Non-AM container preempted</th>
-            <th>Num AM container preempted</th>
-            <th>Aggregated Resource Usage</th>
-          </tr>
+        <tr>
+          <th>Allocated Resource</th>
+          <th>Running Containers</th>
+          <th>Preempted Resource</th>
+          <th>Num Non-AM container preempted</th>
+          <th>Num AM container preempted</th>
+          <th>Aggregated Resource Usage</th>
+        </tr>
         </thead>
+
         <tbody>
-          <tr>
-            <td>{{model.app.allocatedResource}}</td>
-            <td>{{model.app.runningContainersNumber}}</td>
-            <td>{{model.app.preemptedResource}}</td>
-            <td>{{model.app.numAMContainerPreempted}}</td>
-            <td>{{model.app.numAMContainerPreempted}}</td>
-            <td>{{model.app.aggregatedResourceUsage}}</td>
-          </tr>
+        <tr>
+          <td>{{model.app.allocatedResource}}</td>
+          <td>{{model.app.runningContainersNumber}}</td>
+          <td>{{model.app.preemptedResource}}</td>
+          <td>{{model.app.numAMContainerPreempted}}</td>
+          <td>{{model.app.numAMContainerPreempted}}</td>
+          <td>{{model.app.aggregatedResourceUsage}}</td>
+        </tr>
         </tbody>
       </table>
     </div>
@@ -148,20 +172,55 @@
       <table class="display table table-striped table-bordered"
              cellspacing="0" width="100%">
         <thead>
-          <tr>
-            <th>Master Container Log</th>
-            <th>Master Node</th>
-            <th>Master Node Label Expression</th>
-          </tr>
+        <tr>
+          <th>Master Container Log</th>
+          <th>Master Node</th>
+          <th>Master Node Label Expression</th>
+        </tr>
         </thead>
+
         <tbody>
-          <tr>
-            <td><a href="{{model.app.amContainerLogs}}" target="_blank">Link</a></td>
-            <td><a href="{{amHostHttpAddressFormatted}}" target="_blank">Link</a></td>
-            <td>{{model.app.amNodeLabelExpression}}</td>
-          </tr>
+        <tr>
+          <td><a href="{{model.app.amContainerLogs}}" target="_blank">Link</a></td>
+          <td><a href="{{amHostHttpAddressFormatted}}" target="_blank">Link</a></td>
+          <td>{{model.app.amNodeLabelExpression}}</td>
+        </tr>
         </tbody>
       </table>
     </div>
   </div>
+
+  {{#if model.serviceName}}
+    <div class="col-md-6 container-fluid">
+      <div class="panel panel-default">
+        <div class="panel-heading">Quick Links</div>
+        <table class="display table table-striped table-bordered">
+          <tbody>
+            {{#each model.quicklinks as |link|}}
+              <tr>
+                <td>{{link.name}}</td>
+                <td><a href="{{link.value}}" target="_blank">{{link.value}}</a></td>
+              </tr>
+            {{else}}
+              <tr class="align-center">
+                <td colspan="2">No quicklinks available!</td>
+              </tr>
+            {{/each}}
+          </tbody>
+        </table>
+      </div>
+    </div>
+  {{/if}}
 </div>
+
+{{confirm-dialog
+  dialogId="stopServiceConfirmDialog"
+  message=(concat 'Are you sure you want to stop service "' model.serviceName '" ?')
+  action="stopService"
+}}
+
+{{confirm-dialog
+  dialogId="deleteServiceConfirmDialog"
+  message=(concat 'Are you sure you want to delete service "' model.serviceName '" ?')
+  action="deleteService"
+}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance.hbs
new file mode 100644
index 0000000..36336ad
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance.hbs
@@ -0,0 +1,43 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+{{breadcrumb-bar breadcrumbs=breadcrumbs}}
+
+<div class="col-md-12 container-fluid">
+  <div class="row">
+
+    <div class="col-md-2 container-fluid">
+      <div class="panel panel-default">
+        <div class="panel-heading">
+          Component
+        </div>
+        <div class="panel-body">
+          <ul class="nav nav-pills nav-stacked collapse in">
+            {{#link-to 'yarn-component-instance.info' tagName="li" class=(if (eq target.currentPath 'yarn-component-instance.info') "active")}}
+              {{#link-to 'yarn-component-instance.info' componentName instanceName (query-params service=serviceName appid=appId)}}Information{{/link-to}}
+            {{/link-to}}
+          </ul>
+        </div>
+      </div>
+    </div>
+
+    <div class="col-md-10 container-fluid">
+      {{outlet}}
+    </div>
+  </div>
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs
new file mode 100644
index 0000000..3d5720e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs
@@ -0,0 +1,81 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="row">
+  {{#if model.container}}
+  <div class="panel panel-default">
+    <div class="panel-heading">
+      <div class="panel-title">Component Information</div>
+    </div>
+    <div class="">
+      <table class="table table-striped table-bordered table-hover">
+        <tbody>
+          <tr>
+            <td>Component Name</td>
+            <td>{{check-availability model.container.instanceName}}</td>
+          </tr>
+          <tr>
+            <td>Component Group</td>
+            <td>{{check-availability model.container.component}}</td>
+          </tr>
+          <tr>
+            <td>Current Container Id</td>
+            <td>{{check-availability model.container.containerId}}</td>
+          </tr>
+          <tr>
+            <td>State</td>
+            <td>{{check-availability model.container.state}}</td>
+          </tr>
+          <tr>
+            <td>Created Time</td>
+            <td>{{check-availability model.container.createdDate}}</td>
+          </tr>
+          <tr>
+            <td>Started Time</td>
+            <td>{{check-availability model.container.startedDate}}</td>
+          </tr>
+          <tr>
+            <td>Host</td>
+            <td>{{check-availability model.container.host}}</td>
+          </tr>
+          <tr>
+            <td>Host URL</td>
+            <td>{{check-availability model.container.hostUrl}}</td>
+          </tr>
+          <tr>
+            <td>Node</td>
+            <td>{{check-availability model.container.node}}</td>
+          </tr>
+          <tr>
+            <td>IP Address</td>
+            <td>{{check-availability model.container.ip}}</td>
+          </tr>
+          <tr>
+            <td>Exit Status Code</td>
+            <td>{{check-availability model.container.exitStatusCode}}</td>
+          </tr>
+        </tbody>
+      </table>
+    </div>
+  </div>
+  {{else}}
+  <div class="panel panel-default">
+    <h4 class="text-center">No component information available!</h4>
+  </div>
+  {{/if}}
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances.hbs
new file mode 100644
index 0000000..e7ac57a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances.hbs
@@ -0,0 +1,46 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+{{breadcrumb-bar breadcrumbs=breadcrumbs}}
+
+<div class="col-md-12 container-fluid">
+  <div class="row">
+
+    <div class="col-md-2 container-fluid">
+      <div class="panel panel-default">
+        <div class="panel-heading">
+          Component
+        </div>
+        <div class="panel-body">
+          <ul class="nav nav-pills nav-stacked collapse in">
+            {{#link-to 'yarn-component-instances.info' tagName="li" class=(if (eq target.currentPath 'yarn-component-instances.info') "active")}}
+              {{#link-to 'yarn-component-instances.info' componentName (query-params service=serviceName appid=appId)}}Information{{/link-to}}
+            {{/link-to}}
+            {{#link-to 'yarn-component-instances.configs' tagName="li" class=(if (eq target.currentPath 'yarn-component-instances.configs') "active")}}
+              {{#link-to 'yarn-component-instances.configs' componentName (query-params service=serviceName appid=appId)}}Configurations{{/link-to}}
+            {{/link-to}}
+          </ul>
+        </div>
+      </div>
+    </div>
+
+    <div class="col-md-10 container-fluid">
+      {{outlet}}
+    </div>
+  </div>
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/configs.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/configs.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/configs.hbs
new file mode 100644
index 0000000..85b6b42
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/configs.hbs
@@ -0,0 +1,53 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="row">
+  {{#if model.configs}}
+    <div class="col-md-12">
+      <div class="panel panel-default">
+        <div class="panel-heading">
+          <div class="panel-title">Component Configurations</div>
+        </div>
+        <div class="">
+          <table class="table table-hover table-custom-bordered table-custom-stripped table-radius-none table-border-none">
+            <thead>
+              <tr>
+                <th>Name</th>
+                <th>Value</th>
+              </tr>
+            </thead>
+            <tbody>
+              {{#each model.configs as |config|}}
+                <tr>
+                  <td>{{config.name}}</td>
+                  <td>{{config.value}}</td>
+                </tr>
+              {{/each}}
+            </tbody>
+          </table>
+        </div>
+      </div>
+    </div>
+  {{else}}
+    <div class="col-md-12">
+      <div class="panel panel-default">
+        <h4 class="text-center">No component configurations available!</h4>
+      </div>
+    </div>
+  {{/if}}
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/info.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/info.hbs
new file mode 100644
index 0000000..0b642b0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/info.hbs
@@ -0,0 +1,28 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="row">
+  <div class="col-md-12">
+    <h3>Active Components: {{model.componentName}}</h3>
+    {{em-table columns=tableColumns rows=model.instances}}
+  </div>
+</div>
+
+<div class="col-md-12">
+  {{metrics-table metrics=model.metrics type="Component"}}
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/loading.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/loading.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/loading.hbs
new file mode 100644
index 0000000..a95af2b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/loading.hbs
@@ -0,0 +1,23 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="col-md-12 container-fluid">
+  <div class="loading-mask">
+    <img src="assets/images/spinner.gif" alt="Loading...">
+  </div>
+</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
index 5785d1d..dbec4ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
@@ -26,9 +26,9 @@ module.exports = { // Yarn UI App configurations
     },
     namespaces: {
       timeline: 'ws/v1/applicationhistory',
+      timelineService: 'ws/v2/timeline/apps',
       cluster: 'ws/v1/cluster',
       metrics: 'ws/v1/cluster/metrics',
-      timelineService: 'ws/v2/timeline/apps',
       timelineV2: 'ws/v2/timeline',
       dashService: 'services/v1/applications',
       node: '{nodeAddress}/ws/v1/node'

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/confirm-dialog-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/confirm-dialog-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/confirm-dialog-test.js
new file mode 100644
index 0000000..fedf00b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/confirm-dialog-test.js
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForComponent, test } from 'ember-qunit';
+import hbs from 'htmlbars-inline-precompile';
+
+moduleForComponent('confirm-dialog', 'Integration | Component | confirm dialog', {
+  integration: true
+});
+
+test('it renders', function(assert) {
+
+  // Set any properties with this.set('myProperty', 'value');
+  // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL +
+
+  this.render(hbs`{{confirm-dialog}}`);
+
+  assert.equal(this.$().text().trim(), '');
+
+  // Template block usage:" + EOL +
+  this.render(hbs`
+    {{#confirm-dialog}}
+      template block text
+    {{/confirm-dialog}}
+  `);
+
+  assert.equal(this.$().text().trim(), 'template block text');
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/metrics-table-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/metrics-table-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/metrics-table-test.js
new file mode 100644
index 0000000..f6f9ef0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/metrics-table-test.js
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForComponent, test } from 'ember-qunit';
+import hbs from 'htmlbars-inline-precompile';
+
+moduleForComponent('metrics-table', 'Integration | Component | metrics table', {
+  integration: true
+});
+
+test('it renders', function(assert) {
+
+  // Set any properties with this.set('myProperty', 'value');
+  // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL +
+
+  this.render(hbs`{{metrics-table}}`);
+
+  assert.equal(this.$().text().trim(), '');
+
+  // Template block usage:" + EOL +
+  this.render(hbs`
+    {{#metrics-table}}
+      template block text
+    {{/metrics-table}}
+  `);
+
+  assert.equal(this.$().text().trim(), 'template block text');
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-component-instance-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-component-instance-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-component-instance-test.js
new file mode 100644
index 0000000..f1eaba6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-component-instance-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('adapter:yarn-component-instance', 'Unit | Adapter | yarn component instance', {
+  // Specify the other units that are required for this test.
+  // needs: ['serializer:foo']
+});
+
+// Replace this with your real tests.
+test('it exists', function(assert) {
+  let adapter = this.subject();
+  assert.ok(adapter);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-service-component-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-service-component-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-service-component-test.js
new file mode 100644
index 0000000..15b862b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-service-component-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('adapter:yarn-service-component', 'Unit | Adapter | yarn service component', {
+  // Specify the other units that are required for this test.
+  // needs: ['serializer:foo']
+});
+
+// Replace this with your real tests.
+test('it exists', function(assert) {
+  let adapter = this.subject();
+  assert.ok(adapter);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-service-info-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-service-info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-service-info-test.js
new file mode 100644
index 0000000..4ab8680
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-service-info-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('adapter:yarn-service-info', 'Unit | Adapter | yarn service info', {
+  // Specify the other units that are required for this test.
+  // needs: ['serializer:foo']
+});
+
+// Replace this with your real tests.
+test('it exists', function(assert) {
+  let adapter = this.subject();
+  assert.ok(adapter);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/components-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/components-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/components-test.js
new file mode 100644
index 0000000..4ef38ff
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/components-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('controller:yarn-app/components', 'Unit | Controller | yarn app/components', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+// Replace this with your real tests.
+test('it exists', function(assert) {
+  let controller = this.subject();
+  assert.ok(controller);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/configs-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/configs-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/configs-test.js
new file mode 100644
index 0000000..0b26cde
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/configs-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('controller:yarn-app/configs', 'Unit | Controller | yarn app/configs', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+// Replace this with your real tests.
+test('it exists', function(assert) {
+  let controller = this.subject();
+  assert.ok(controller);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instance-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instance-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instance-test.js
new file mode 100644
index 0000000..c8f29b9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instance-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('controller:yarn-component-instance', 'Unit | Controller | yarn component instance', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+// Replace this with your real tests.
+test('it exists', function(assert) {
+  let controller = this.subject();
+  assert.ok(controller);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instance/info-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instance/info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instance/info-test.js
new file mode 100644
index 0000000..2abbe9f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instance/info-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('controller:yarn-component-instance/info', 'Unit | Controller | yarn component instance/info', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+// Replace this with your real tests.
+test('it exists', function(assert) {
+  let controller = this.subject();
+  assert.ok(controller);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances-test.js
new file mode 100644
index 0000000..8622c71
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('controller:yarn-component-instances', 'Unit | Controller | yarn component instances', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+// Replace this with your real tests.
+test('it exists', function(assert) {
+  let controller = this.subject();
+  assert.ok(controller);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances/configs-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances/configs-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances/configs-test.js
new file mode 100644
index 0000000..63a6836
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances/configs-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('controller:yarn-component-instances/configs', 'Unit | Controller | yarn component instances/configs', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+// Replace this with your real tests.
+test('it exists', function(assert) {
+  let controller = this.subject();
+  assert.ok(controller);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances/info-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances/info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances/info-test.js
new file mode 100644
index 0000000..328679a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances/info-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('controller:yarn-component-instances/info', 'Unit | Controller | yarn component instances/info', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+// Replace this with your real tests.
+test('it exists', function(assert) {
+  let controller = this.subject();
+  assert.ok(controller);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/helpers/check-availability-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/helpers/check-availability-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/helpers/check-availability-test.js
new file mode 100644
index 0000000..2a9db72
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/helpers/check-availability-test.js
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { checkAvailability } from '../../../helpers/check-availability';
+import { module, test } from 'qunit';
+
+module('Unit | Helper | check availability');
+
+// Replace this with your real tests.
+test('it works', function(assert) {
+  let result = checkAvailability(42);
+  assert.ok(result);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-component-instance-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-component-instance-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-component-instance-test.js
new file mode 100644
index 0000000..0c79c39
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-component-instance-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForModel, test } from 'ember-qunit';
+
+moduleForModel('yarn-component-instance', 'Unit | Model | yarn component instance', {
+  // Specify the other units that are required for this test.
+  needs: []
+});
+
+test('it exists', function(assert) {
+  let model = this.subject();
+  // let store = this.store();
+  assert.ok(!!model);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-service-component-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-service-component-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-service-component-test.js
new file mode 100644
index 0000000..d7476a2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-service-component-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForModel, test } from 'ember-qunit';
+
+moduleForModel('yarn-service-component', 'Unit | Model | yarn service component', {
+  // Specify the other units that are required for this test.
+  needs: []
+});
+
+test('it exists', function(assert) {
+  let model = this.subject();
+  // let store = this.store();
+  assert.ok(!!model);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-service-info-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-service-info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-service-info-test.js
new file mode 100644
index 0000000..114be00
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-service-info-test.js
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForModel, test } from 'ember-qunit';
+
+moduleForModel('yarn-service-info', 'Unit | Model | yarn service info', {
+  // Specify the other units that are required for this test.
+  needs: []
+});
+
+test('it exists', function(assert) {
+  let model = this.subject();
+  // let store = this.store();
+  assert.ok(!!model);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/components-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/components-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/components-test.js
new file mode 100644
index 0000000..1dd8909
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/components-test.js
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('route:yarn-app/components', 'Unit | Route | yarn app/components', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+test('it exists', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/configs-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/configs-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/configs-test.js
new file mode 100644
index 0000000..7b90712
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/configs-test.js
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('route:yarn-app/configs', 'Unit | Route | yarn app/configs', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+test('it exists', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instance-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instance-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instance-test.js
new file mode 100644
index 0000000..b9ab2f0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instance-test.js
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('route:yarn-component-instance', 'Unit | Route | yarn component instance', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+test('it exists', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instance/info-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instance/info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instance/info-test.js
new file mode 100644
index 0000000..a2784ce
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instance/info-test.js
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('route:yarn-component-instance/info', 'Unit | Route | yarn component instance/info', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+test('it exists', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances-test.js
new file mode 100644
index 0000000..6aee99a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances-test.js
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('route:yarn-component-instances', 'Unit | Route | yarn component instances', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+test('it exists', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances/configs-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances/configs-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances/configs-test.js
new file mode 100644
index 0000000..281aabb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances/configs-test.js
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('route:yarn-component-instances/configs', 'Unit | Route | yarn component instances/configs', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+test('it exists', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances/info-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances/info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances/info-test.js
new file mode 100644
index 0000000..23d9bb8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances/info-test.js
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleFor, test } from 'ember-qunit';
+
+moduleFor('route:yarn-component-instances/info', 'Unit | Route | yarn component instances/info', {
+  // Specify the other units that are required for this test.
+  // needs: ['controller:foo']
+});
+
+test('it exists', function(assert) {
+  let route = this.subject();
+  assert.ok(route);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-component-instance-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-component-instance-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-component-instance-test.js
new file mode 100644
index 0000000..6add066
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-component-instance-test.js
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForModel, test } from 'ember-qunit';
+
+moduleForModel('yarn-component-instance', 'Unit | Serializer | yarn component instance', {
+  // Specify the other units that are required for this test.
+  needs: ['serializer:yarn-component-instance']
+});
+
+// Replace this with your real tests.
+test('it serializes records', function(assert) {
+  let record = this.subject();
+
+  let serializedRecord = record.serialize();
+
+  assert.ok(serializedRecord);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-service-component-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-service-component-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-service-component-test.js
new file mode 100644
index 0000000..c9df24b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-service-component-test.js
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForModel, test } from 'ember-qunit';
+
+moduleForModel('yarn-service-component', 'Unit | Serializer | yarn service component', {
+  // Specify the other units that are required for this test.
+  needs: ['serializer:yarn-service-component']
+});
+
+// Replace this with your real tests.
+test('it serializes records', function(assert) {
+  let record = this.subject();
+
+  let serializedRecord = record.serialize();
+
+  assert.ok(serializedRecord);
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b6c2b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-service-info-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-service-info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-service-info-test.js
new file mode 100644
index 0000000..9b1d310
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-service-info-test.js
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import { moduleForModel, test } from 'ember-qunit';
+
+moduleForModel('yarn-service-info', 'Unit | Serializer | yarn service info', {
+  // Specify the other units that are required for this test.
+  needs: ['serializer:yarn-service-info']
+});
+
+// Replace this with your real tests.
+test('it serializes records', function(assert) {
+  let record = this.subject();
+
+  let serializedRecord = record.serialize();
+
+  assert.ok(serializedRecord);
+});


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: YARN-6255. Refactor yarn-native-services framework. Contributed by Jian He

Posted by ji...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0536f18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index 00e2b62..f4ea70b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -20,7 +20,6 @@ package org.apache.slider.client;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.io.Files;
-import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.StringUtils;
@@ -35,7 +34,6 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.registry.client.api.RegistryConstants;
 import org.apache.hadoop.registry.client.api.RegistryOperations;
 import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
@@ -55,40 +53,44 @@ import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeout;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.client.api.YarnClientApplication;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Times;
-import org.apache.slider.api.ClusterDescription;
 import org.apache.slider.api.ClusterNode;
 import org.apache.slider.api.SliderApplicationApi;
 import org.apache.slider.api.SliderClusterProtocol;
-import org.apache.slider.api.StateValues;
 import org.apache.slider.api.proto.Messages;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
 import org.apache.slider.api.types.ContainerInformation;
 import org.apache.slider.api.types.NodeInformationList;
-import org.apache.slider.api.types.SliderInstanceDescription;
 import org.apache.slider.client.ipc.SliderApplicationIpcClient;
 import org.apache.slider.client.ipc.SliderClusterOperations;
 import org.apache.slider.common.Constants;
 import org.apache.slider.common.SliderExitCodes;
 import org.apache.slider.common.SliderKeys;
+import org.apache.slider.common.SliderXmlConfKeys;
 import org.apache.slider.common.params.AbstractActionArgs;
 import org.apache.slider.common.params.AbstractClusterBuildingActionArgs;
 import org.apache.slider.common.params.ActionAMSuicideArgs;
 import org.apache.slider.common.params.ActionClientArgs;
 import org.apache.slider.common.params.ActionCreateArgs;
 import org.apache.slider.common.params.ActionDependencyArgs;
-import org.apache.slider.common.params.ActionDestroyArgs;
 import org.apache.slider.common.params.ActionDiagnosticArgs;
 import org.apache.slider.common.params.ActionEchoArgs;
 import org.apache.slider.common.params.ActionExistsArgs;
@@ -113,20 +115,13 @@ import org.apache.slider.common.params.ActionUpgradeArgs;
 import org.apache.slider.common.params.Arguments;
 import org.apache.slider.common.params.ClientArgs;
 import org.apache.slider.common.params.CommonArgs;
-import org.apache.slider.common.params.LaunchArgsAccessor;
 import org.apache.slider.common.tools.ConfigHelper;
-import org.apache.slider.common.tools.Duration;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.common.tools.SliderUtils;
 import org.apache.slider.common.tools.SliderVersionInfo;
-import org.apache.slider.core.buildutils.InstanceBuilder;
 import org.apache.slider.core.buildutils.InstanceIO;
 import org.apache.slider.core.conf.AggregateConf;
 import org.apache.slider.core.conf.ConfTree;
-import org.apache.slider.core.conf.ConfTreeOperations;
-import org.apache.slider.core.conf.MapOperations;
-import org.apache.slider.core.conf.ResourcesInputPropertiesValidator;
-import org.apache.slider.core.conf.TemplateInputPropertiesValidator;
 import org.apache.slider.core.exceptions.BadClusterStateException;
 import org.apache.slider.core.exceptions.BadCommandArgumentsException;
 import org.apache.slider.core.exceptions.BadConfigException;
@@ -137,18 +132,13 @@ import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.exceptions.UnknownApplicationInstanceException;
 import org.apache.slider.core.exceptions.UsageException;
 import org.apache.slider.core.exceptions.WaitTimeoutException;
-import org.apache.slider.core.launch.AppMasterLauncher;
 import org.apache.slider.core.launch.ClasspathConstructor;
 import org.apache.slider.core.launch.CredentialUtils;
 import org.apache.slider.core.launch.JavaCommandLineBuilder;
-import org.apache.slider.core.launch.LaunchedApplication;
 import org.apache.slider.core.launch.SerializedApplicationReport;
 import org.apache.slider.core.main.RunService;
-import org.apache.slider.core.persist.AppDefinitionPersister;
 import org.apache.slider.core.persist.ApplicationReportSerDeser;
-import org.apache.slider.core.persist.ConfPersister;
 import org.apache.slider.core.persist.JsonSerDeser;
-import org.apache.slider.core.persist.LockAcquireFailedException;
 import org.apache.slider.core.registry.SliderRegistryUtils;
 import org.apache.slider.core.registry.YarnAppListClient;
 import org.apache.slider.core.registry.docstore.ConfigFormat;
@@ -160,19 +150,19 @@ import org.apache.slider.core.registry.docstore.PublishedExportsSet;
 import org.apache.slider.core.registry.retrieve.RegistryRetriever;
 import org.apache.slider.core.zk.BlockingZKWatcher;
 import org.apache.slider.core.zk.ZKIntegration;
-import org.apache.slider.core.zk.ZKPathBuilder;
 import org.apache.slider.providers.AbstractClientProvider;
+import org.apache.slider.providers.ProviderUtils;
 import org.apache.slider.providers.SliderProviderFactory;
 import org.apache.slider.providers.agent.AgentKeys;
-import org.apache.slider.providers.docker.DockerClientProvider;
-import org.apache.slider.providers.slideram.SliderAMClientProvider;
 import org.apache.slider.server.appmaster.SliderAppMaster;
 import org.apache.slider.server.appmaster.rpc.RpcBinder;
 import org.apache.slider.server.services.utility.AbstractSliderLaunchedService;
+import org.apache.slider.util.ServiceApiUtil;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooDefs;
 import org.apache.zookeeper.data.ACL;
+import org.codehaus.jackson.map.PropertyNamingStrategy;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.slf4j.Logger;
@@ -182,7 +172,6 @@ import java.io.ByteArrayOutputStream;
 import java.io.Console;
 import java.io.File;
 import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InterruptedIOException;
@@ -191,10 +180,7 @@ import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.io.StringWriter;
 import java.io.Writer;
-import java.net.InetSocketAddress;
-import java.net.URISyntaxException;
 import java.nio.charset.Charset;
-import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -213,14 +199,11 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import static org.apache.hadoop.registry.client.binding.RegistryUtils.*;
-import static org.apache.slider.api.InternalKeys.*;
-import static org.apache.slider.api.OptionKeys.*;
-import static org.apache.slider.api.ResourceKeys.*;
+import static org.apache.slider.api.InternalKeys.INTERNAL_APPLICATION_IMAGE_PATH;
 import static org.apache.slider.common.Constants.HADOOP_JAAS_DEBUG;
 import static org.apache.slider.common.params.SliderActions.*;
 import static org.apache.slider.common.tools.SliderUtils.*;
 
-
 /**
  * Client service for Slider
  */
@@ -246,6 +229,9 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   public static final String E_NO_RESOURCE_MANAGER = "No valid Resource Manager address provided";
   public static final String E_PACKAGE_EXISTS = "Package exists";
   private static PrintStream clientOutputStream = System.out;
+  private static final JsonSerDeser<Application> jsonSerDeser =
+      new JsonSerDeser<Application>(Application.class,
+          PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES);
 
   // value should not be changed without updating string find in slider.py
   private static final String PASSWORD_PROMPT = "Enter password for";
@@ -362,16 +348,22 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
             serviceArgs.getActionAMSuicideArgs());
         break;
       
-      case ACTION_BUILD:
-        exitCode = actionBuild(clusterName, serviceArgs.getActionBuildArgs());
-        break;
-      
       case ACTION_CLIENT:
         exitCode = actionClient(serviceArgs.getActionClientArgs());
         break;
 
       case ACTION_CREATE:
-        exitCode = actionCreate(clusterName, serviceArgs.getActionCreateArgs());
+        ActionCreateArgs args = serviceArgs.getActionCreateArgs();
+        File file = args.getAppDef();
+        Path filePath = new Path(file.getAbsolutePath());
+        log.info("Loading app definition from: " + filePath);
+        Application application =
+            jsonSerDeser.load(FileSystem.getLocal(getConfig()), filePath);
+        if(args.lifetime > 0) {
+          application.setLifetime(args.lifetime);
+        }
+        application.setName(clusterName);
+        actionCreate(application);
         break;
 
       case ACTION_DEPENDENCY:
@@ -379,7 +371,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
         break;
 
       case ACTION_DESTROY:
-        exitCode = actionDestroy(clusterName, serviceArgs.getActionDestroyArgs());
+        actionDestroy(clusterName);
         break;
 
       case ACTION_DIAGNOSTICS:
@@ -392,11 +384,11 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
         break;
       
       case ACTION_FLEX:
-        exitCode = actionFlex(clusterName, serviceArgs.getActionFlexArgs());
+        actionFlex(clusterName, serviceArgs.getActionFlexArgs());
         break;
       
-      case ACTION_FREEZE:
-        exitCode = actionFreeze(clusterName, serviceArgs.getActionFreezeArgs());
+      case ACTION_STOP:
+        actionStop(clusterName, serviceArgs.getActionFreezeArgs());
         break;
       
       case ACTION_HELP:
@@ -456,8 +448,8 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
         exitCode = actionStatus(clusterName, serviceArgs.getActionStatusArgs());
         break;
 
-      case ACTION_THAW:
-        exitCode = actionThaw(clusterName, serviceArgs.getActionThawArgs());
+      case ACTION_START:
+        exitCode = actionStart(clusterName, serviceArgs.getActionThawArgs());
         break;
 
       case ACTION_TOKENS:
@@ -516,7 +508,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     String zkPath = ZKIntegration.mkClusterPath(user, clusterName);
     Exception e = null;
     try {
-      Configuration config = getConfig();
       ZKIntegration client = getZkClient(clusterName, user);
       if (client != null) {
         if (client.exists(zkPath)) {
@@ -627,76 +618,31 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
    * force=true by default.
    */
   @Override
-  public int actionDestroy(String clustername) throws YarnException,
-                                                      IOException {
-    ActionDestroyArgs destroyArgs = new ActionDestroyArgs();
-    destroyArgs.force = true;
-    return actionDestroy(clustername, destroyArgs);
-  }
-
-  @Override
-  public int actionDestroy(String clustername,
-      ActionDestroyArgs destroyArgs) throws YarnException, IOException {
-    // verify that a live cluster isn't there
-    validateClusterName(clustername);
-    //no=op, it is now mandatory. 
-    verifyBindingsDefined();
-    verifyNoLiveClusters(clustername, "Destroy");
-    boolean forceDestroy = destroyArgs.force;
-    log.debug("actionDestroy({}, force={})", clustername, forceDestroy);
-
-    // create the directory path
-    Path clusterDirectory = sliderFileSystem.buildClusterDirPath(clustername);
-    // delete the directory;
+  public void actionDestroy(String appName)
+      throws YarnException, IOException {
+    validateClusterName(appName);
+    Path appDir = sliderFileSystem.buildClusterDirPath(appName);
     FileSystem fs = sliderFileSystem.getFileSystem();
-    boolean exists = fs.exists(clusterDirectory);
-    if (exists) {
-      log.debug("Application Instance {} found at {}: destroying", clustername, clusterDirectory);
-      if (!forceDestroy) {
-        // fail the command if --force is not explicitly specified
-        throw new UsageException("Destroy will permanently delete directories and registries. "
-            + "Reissue this command with the --force option if you want to proceed.");
-      }
-      if (!fs.delete(clusterDirectory, true)) {
-        log.warn("Filesystem returned false from delete() operation");
-      }
-
-      if(!deleteZookeeperNode(clustername)) {
-        log.warn("Unable to perform node cleanup in Zookeeper.");
-      }
-
-      if (fs.exists(clusterDirectory)) {
-        log.warn("Failed to delete {}", clusterDirectory);
+    if (fs.exists(appDir)) {
+      if (fs.delete(appDir, true)) {
+        log.info("Successfully deleted application + " + appName);
+        return;
+      } else {
+        String message =
+            "Failed to delete application + " + appName + " at:  " + appDir;
+        log.info(message);
+        throw new YarnException(message);
       }
-
-    } else {
-      log.debug("Application Instance {} already destroyed", clustername);
-    }
-
-    // rm the registry entry —do not let this block the destroy operations
-    String registryPath = SliderRegistryUtils.registryPathForInstance(
-        clustername);
-    try {
-      getRegistryOperations().delete(registryPath, true);
-    } catch (IOException e) {
-      log.warn("Error deleting registry entry {}: {} ", registryPath, e, e);
-    } catch (SliderException e) {
-      log.warn("Error binding to registry {} ", e, e);
     }
-
-    List<ApplicationReport> instances = findAllLiveInstances(clustername);
-    // detect any race leading to cluster creation during the check/destroy process
-    // and report a problem.
-    if (!instances.isEmpty()) {
-      throw new SliderException(EXIT_APPLICATION_IN_USE,
-                              clustername + ": "
-                              + E_DESTROY_CREATE_RACE_CONDITION
-                              + " :" +
-                              instances.get(0));
+    if (!deleteZookeeperNode(appName)) {
+      String message =
+          "Failed to cleanup cleanup application " + appName + " in zookeeper";
+      log.warn(message);
+      throw new YarnException(message);
     }
-    log.info("Destroyed cluster {}", clustername);
-    return EXIT_SUCCESS;
+    //TODO clean registry
   }
+
   
   @Override
   public int actionAmSuicide(String clustername,
@@ -715,203 +661,285 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     return factory.createClientProvider();
   }
 
-  /**
-   * Create the cluster -saving the arguments to a specification file first
-   * @param clustername cluster name
-   * @return the status code
-   * @throws YarnException Yarn problems
-   * @throws IOException other problems
-   * @throws BadCommandArgumentsException bad arguments.
-   */
-  public int actionCreate(String clustername, ActionCreateArgs createArgs) throws
-                                               YarnException,
-                                               IOException {
-
-    actionBuild(clustername, createArgs);
-    Path clusterDirectory = sliderFileSystem.buildClusterDirPath(clustername);
-    AggregateConf instanceDefinition = loadInstanceDefinitionUnresolved(
-        clustername, clusterDirectory);
-    try {
-      checkForCredentials(getConfig(), instanceDefinition.getAppConf(),
-          clustername);
-    } catch (IOException e) {
-      sliderFileSystem.getFileSystem().delete(clusterDirectory, true);
-      throw e;
-    }
-    return startCluster(clustername, createArgs, createArgs.lifetime);
-  }
-
-  @Override
-  public int actionUpgrade(String clustername, ActionUpgradeArgs upgradeArgs)
-      throws YarnException, IOException {
-    File template = upgradeArgs.template;
-    File resources = upgradeArgs.resources;
-    List<String> containers = upgradeArgs.containers;
-    List<String> components = upgradeArgs.components;
-
-    // For upgrade spec, let's be little more strict with validation. If either
-    // --template or --resources is specified, then both needs to be specified.
-    // Otherwise the internal app config and resources states of the app will be
-    // unwantedly modified and the change will take effect to the running app
-    // immediately.
-    require(!(template != null && resources == null),
-          "Option %s must be specified with option %s",
-          Arguments.ARG_RESOURCES, Arguments.ARG_TEMPLATE);
-
-    require(!(resources != null && template == null),
-          "Option %s must be specified with option %s",
-          Arguments.ARG_TEMPLATE, Arguments.ARG_RESOURCES);
-
-    // For upgrade spec, both --template and --resources should be specified
-    // and neither of --containers or --components should be used
-    if (template != null && resources != null) {
-      require(CollectionUtils.isEmpty(containers),
-            "Option %s cannot be specified with %s or %s",
-            Arguments.ARG_CONTAINERS, Arguments.ARG_TEMPLATE,
-            Arguments.ARG_RESOURCES);
-      require(CollectionUtils.isEmpty(components),
-              "Option %s cannot be specified with %s or %s",
-              Arguments.ARG_COMPONENTS, Arguments.ARG_TEMPLATE,
-              Arguments.ARG_RESOURCES);
-
-      // not an error to try to upgrade a stopped cluster, just return success
-      // code, appropriate log messages have already been dumped
-      if (!isAppInRunningState(clustername)) {
-        return EXIT_SUCCESS;
-      }
 
-      // Now initiate the upgrade spec flow
-      buildInstanceDefinition(clustername, upgradeArgs, true, true, true);
-      SliderClusterOperations clusterOperations = createClusterOperations(clustername);
-      clusterOperations.amSuicide("AM restarted for application upgrade", 1, 1000);
-      return EXIT_SUCCESS;
-    }
+  public ApplicationId actionCreate(Application application)
+      throws IOException, YarnException {
+    ServiceApiUtil.validateApplicationPostPayload(application);
+    String appName = application.getName();
+    validateClusterName(appName);
+    verifyNoLiveApp(appName, "Create");
+    Path appDir = checkAppNotExistOnHdfs(application);
 
-    // Since neither --template or --resources were specified, it is upgrade
-    // containers flow. Here any one or both of --containers and --components
-    // can be specified. If a container is specified with --containers option
-    // and also belongs to a component type specified with --components, it will
-    // be upgraded only once.
-    return actionUpgradeContainers(clustername, upgradeArgs);
+    ApplicationId appId = submitApp(application);
+    application.setId(appId.toString());
+    // write app definition on to hdfs
+    persistApp(appDir, application);
+    return appId;
+    //TODO deal with registry
   }
 
-  private int actionUpgradeContainers(String clustername,
-      ActionUpgradeArgs upgradeArgs) throws YarnException, IOException {
-    verifyBindingsDefined();
-    validateClusterName(clustername);
-    int waittime = upgradeArgs.getWaittime(); // ignored for now
-    String text = "Upgrade containers";
-    log.debug("actionUpgradeContainers({}, reason={}, wait={})", clustername,
-        text, waittime);
-
-    // not an error to try to upgrade a stopped cluster, just return success
-    // code, appropriate log messages have already been dumped
-    if (!isAppInRunningState(clustername)) {
-      return EXIT_SUCCESS;
-    }
-
-    // Create sets of containers and components to get rid of duplicates and
-    // for quick lookup during checks below
-    Set<String> containers = new HashSet<>();
-    if (upgradeArgs.containers != null) {
-      containers.addAll(new ArrayList<>(upgradeArgs.containers));
-    }
-    Set<String> components = new HashSet<>();
-    if (upgradeArgs.components != null) {
-      components.addAll(new ArrayList<>(upgradeArgs.components));
-    }
-
-    // check validity of component names and running containers here
-    List<ContainerInformation> liveContainers = getContainers(clustername);
-    Set<String> validContainers = new HashSet<>();
-    Set<String> validComponents = new HashSet<>();
-    for (ContainerInformation liveContainer : liveContainers) {
-      boolean allContainersAndComponentsAccountedFor = true;
-      if (CollectionUtils.isNotEmpty(containers)) {
-        if (containers.contains(liveContainer.containerId)) {
-          containers.remove(liveContainer.containerId);
-          validContainers.add(liveContainer.containerId);
-        }
-        allContainersAndComponentsAccountedFor = false;
-      }
-      if (CollectionUtils.isNotEmpty(components)) {
-        if (components.contains(liveContainer.component)) {
-          components.remove(liveContainer.component);
-          validComponents.add(liveContainer.component);
-        }
-        allContainersAndComponentsAccountedFor = false;
-      }
-      if (allContainersAndComponentsAccountedFor) {
-        break;
+  private ApplicationId submitApp(Application app)
+      throws IOException, YarnException {
+    String appName = app.getName();
+    Configuration conf = getConfig();
+    Path appRootDir = sliderFileSystem.buildClusterDirPath(app.getName());
+    deployedClusterName = appName;
+
+    YarnClientApplication yarnApp =  yarnClient.createApplication();
+    ApplicationSubmissionContext submissionContext =
+        yarnApp.getApplicationSubmissionContext();
+    applicationId = submissionContext.getApplicationId();
+    submissionContext.setKeepContainersAcrossApplicationAttempts(true);
+    if (app.getLifetime() > 0) {
+      Map<ApplicationTimeoutType, Long> appTimeout = new HashMap<>();
+      appTimeout.put(ApplicationTimeoutType.LIFETIME, app.getLifetime());
+      submissionContext.setApplicationTimeouts(appTimeout);
+    }
+    submissionContext.setMaxAppAttempts(conf.getInt(KEY_AM_RESTART_LIMIT, 2));
+
+    Map<String, LocalResource> localResources =
+        new HashMap<String, LocalResource>();
+
+    // copy local slideram-log4j.properties to hdfs and add to localResources
+    boolean hasSliderAMLog4j =
+        addAMLog4jResource(appName, conf, localResources);
+    // copy jars to hdfs and add to localResources
+    Path tempPath = addJarResource(appName, localResources);
+    // add keytab if in secure env
+    addKeytabResourceIfSecure(sliderFileSystem, localResources, conf, appName);
+    printLocalResources(localResources);
+
+    //TODO SliderAMClientProvider#copyEnvVars
+    //TODO localResource putEnv
+
+    Map<String, String> env = addAMEnv(conf, tempPath);
+
+    // create AM CLI
+    String cmdStr =
+        buildCommandLine(appName, conf, appRootDir, hasSliderAMLog4j);
+
+    //TODO set log aggregation context
+    //TODO set retry window
+    submissionContext.setResource(Resource.newInstance(
+        conf.getLong(KEY_AM_RESOURCE_MEM, DEFAULT_KEY_AM_RESOURCE_MEM), 1));
+    submissionContext.setQueue(conf.get(KEY_YARN_QUEUE, DEFAULT_YARN_QUEUE));
+    submissionContext.setApplicationName(appName);
+    submissionContext.setApplicationType(SliderKeys.APP_TYPE);
+    Set<String> appTags =
+        AbstractClientProvider.createApplicationTags(appName, null, null);
+    if (!appTags.isEmpty()) {
+      submissionContext.setApplicationTags(appTags);
+    }
+    ContainerLaunchContext amLaunchContext =
+        Records.newRecord(ContainerLaunchContext.class);
+    amLaunchContext.setCommands(Collections.singletonList(cmdStr));
+    amLaunchContext.setEnvironment(env);
+    amLaunchContext.setLocalResources(localResources);
+    addCredentialsIfSecure(conf, amLaunchContext);
+    submissionContext.setAMContainerSpec(amLaunchContext);
+    yarnClient.submitApplication(submissionContext);
+    return submissionContext.getApplicationId();
+  }
+
+  private void printLocalResources(Map<String, LocalResource> map) {
+    log.info("Added LocalResource for localization: ");
+    StringBuilder builder = new StringBuilder();
+    for (Map.Entry<String, LocalResource> entry : map.entrySet()) {
+      builder.append(entry.getKey()).append(" -> ")
+          .append(entry.getValue().getResource().getFile())
+          .append(System.lineSeparator());
+    }
+    log.info(builder.toString());
+  }
+
+  private void addCredentialsIfSecure(Configuration conf,
+      ContainerLaunchContext amLaunchContext) throws IOException {
+    if (UserGroupInformation.isSecurityEnabled()) {
+      // pick up oozie credentials
+      Credentials credentials =
+          CredentialUtils.loadTokensFromEnvironment(System.getenv(), conf);
+      if (credentials == null) {
+        // nothing from oozie, so build up directly
+        credentials = new Credentials(
+            UserGroupInformation.getCurrentUser().getCredentials());
+        CredentialUtils.addRMRenewableFSDelegationTokens(conf,
+            sliderFileSystem.getFileSystem(), credentials);
+      } else {
+        log.info("Using externally supplied credentials to launch AM");
       }
+      amLaunchContext.setTokens(CredentialUtils.marshallCredentials(credentials));
     }
+  }
 
-    // If any item remains in containers or components then they are invalid.
-    // Log warning for them and proceed.
-    if (CollectionUtils.isNotEmpty(containers)) {
-      log.warn("Invalid set of containers provided {}", containers);
-    }
-    if (CollectionUtils.isNotEmpty(components)) {
-      log.warn("Invalid set of components provided {}", components);
+  private String buildCommandLine(String appName, Configuration conf,
+      Path appRootDir, boolean hasSliderAMLog4j) throws BadConfigException {
+    JavaCommandLineBuilder CLI = new JavaCommandLineBuilder();
+    CLI.forceIPv4().headless();
+    //TODO CLI.setJVMHeap
+    //TODO CLI.addJVMOPTS
+    if (hasSliderAMLog4j) {
+      CLI.sysprop(SYSPROP_LOG4J_CONFIGURATION, LOG4J_SERVER_PROP_FILENAME);
+      CLI.sysprop(SYSPROP_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR);
     }
-
-    // If not a single valid container or component is specified do not proceed
-    if (CollectionUtils.isEmpty(validContainers)
-        && CollectionUtils.isEmpty(validComponents)) {
-      log.error("Not a single valid container or component specified. Nothing to do.");
-      return EXIT_NOT_FOUND;
+    CLI.add(SliderAppMaster.SERVICE_CLASSNAME);
+    CLI.add(ACTION_CREATE, appName);
+    //TODO debugAM CLI.add(Arguments.ARG_DEBUG)
+    CLI.add(Arguments.ARG_CLUSTER_URI, appRootDir.toUri());
+//    InetSocketAddress rmSchedulerAddress = getRmSchedulerAddress(conf);
+//    String rmAddr = NetUtils.getHostPortString(rmSchedulerAddress);
+//    CLI.add(Arguments.ARG_RM_ADDR, rmAddr);
+    // pass the registry binding
+    CLI.addConfOptionToCLI(conf, RegistryConstants.KEY_REGISTRY_ZK_ROOT,
+        RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT);
+    CLI.addMandatoryConfOption(conf, RegistryConstants.KEY_REGISTRY_ZK_QUORUM);
+    if(isHadoopClusterSecure(conf)) {
+      //TODO Is this required ??
+      // if the cluster is secure, make sure that
+      // the relevant security settings go over
+      CLI.addConfOption(conf, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY);
     }
+//    // copy over any/all YARN RM client values, in case the server-side XML conf file
+//    // has the 0.0.0.0 address
+//    CLI.addConfOptions(conf, YarnConfiguration.RM_ADDRESS,
+//        YarnConfiguration.RM_CLUSTER_ID, YarnConfiguration.RM_HOSTNAME,
+//        YarnConfiguration.RM_PRINCIPAL);
 
-    SliderClusterProtocol appMaster = connect(findInstance(clustername));
-    Messages.UpgradeContainersRequestProto r =
-      Messages.UpgradeContainersRequestProto
-              .newBuilder()
-              .setMessage(text)
-              .addAllContainer(validContainers)
-              .addAllComponent(validComponents)
-              .build();
-    appMaster.upgradeContainers(r);
-    log.info("Cluster upgrade issued for -");
-    if (CollectionUtils.isNotEmpty(validContainers)) {
-      log.info(" Containers (total {}): {}", validContainers.size(),
-          validContainers);
-    }
-    if (CollectionUtils.isNotEmpty(validComponents)) {
-      log.info(" Components (total {}): {}", validComponents.size(),
-          validComponents);
-    }
+    // write out the path output
+    CLI.addOutAndErrFiles(STDOUT_AM, STDERR_AM);
+    String cmdStr = CLI.build();
+    log.info("Completed setting up app master command: {}", cmdStr);
+    return cmdStr;
+  }
 
-    return EXIT_SUCCESS;
+  private Map<String, String> addAMEnv(Configuration conf, Path tempPath)
+      throws IOException {
+    Map<String, String> env = new HashMap<String, String>();
+    ClasspathConstructor classpath =
+        buildClasspath(SliderKeys.SUBMITTED_CONF_DIR, "lib",
+            sliderFileSystem, getUsingMiniMRCluster());
+    env.put("CLASSPATH", classpath.buildClasspath());
+    env.put("LANG", "en_US.UTF-8");
+    env.put("LC_ALL", "en_US.UTF-8");
+    env.put("LANGUAGE", "en_US.UTF-8");
+    String jaas = System.getenv(HADOOP_JAAS_DEBUG);
+    if (jaas != null) {
+      env.put(HADOOP_JAAS_DEBUG, jaas);
+    }
+    env.putAll(getAmLaunchEnv(conf));
+    log.info("AM env: \n{}", stringifyMap(env));
+    return env;
+  }
+
+  private Path addJarResource(String appName,
+      Map<String, LocalResource> localResources)
+      throws IOException, SliderException {
+    Path libPath = sliderFileSystem.buildClusterDirPath(appName);
+    ProviderUtils
+        .addProviderJar(localResources, SliderAppMaster.class, SLIDER_JAR,
+            sliderFileSystem, libPath, "lib", false);
+    Path dependencyLibTarGzip = sliderFileSystem.getDependencyTarGzip();
+    if (sliderFileSystem.isFile(dependencyLibTarGzip)) {
+      log.info("Loading lib tar from " + sliderFileSystem.getFileSystem()
+          .getScheme() + ": "  + dependencyLibTarGzip);
+      SliderUtils.putAmTarGzipAndUpdate(localResources, sliderFileSystem);
+    } else {
+      String[] libs = SliderUtils.getLibDirs();
+      log.info("Loading dependencies from local file system: " + Arrays
+          .toString(libs));
+      for (String libDirProp : libs) {
+        ProviderUtils
+            .addAllDependencyJars(localResources, sliderFileSystem, libPath,
+                "lib", libDirProp);
+      }
+    }
+    return libPath;
+  }
+
+  private boolean addAMLog4jResource(String appName, Configuration conf,
+      Map<String, LocalResource> localResources)
+      throws IOException, BadClusterStateException {
+    boolean hasSliderAMLog4j = false;
+    String hadoopConfDir =
+        System.getenv(ApplicationConstants.Environment.HADOOP_CONF_DIR.name());
+    if (hadoopConfDir != null) {
+      File localFile =
+          new File(hadoopConfDir, SliderKeys.LOG4J_SERVER_PROP_FILENAME);
+      if (localFile.exists()) {
+        Path localFilePath = createLocalPath(localFile);
+        Path appDirPath = sliderFileSystem.buildClusterDirPath(appName);
+        Path remoteConfPath =
+            new Path(appDirPath, SliderKeys.SUBMITTED_CONF_DIR);
+        Path remoteFilePath =
+            new Path(remoteConfPath, SliderKeys.LOG4J_SERVER_PROP_FILENAME);
+        copy(conf, localFilePath, remoteFilePath);
+        LocalResource localResource = sliderFileSystem
+            .createAmResource(remoteConfPath, LocalResourceType.FILE);
+        localResources.put(localFilePath.getName(), localResource);
+        hasSliderAMLog4j = true;
+      }
+    }
+    return hasSliderAMLog4j;
+  }
+
+  private Path checkAppNotExistOnHdfs(Application application)
+      throws IOException, SliderException {
+    Path appDir = sliderFileSystem.buildClusterDirPath(application.getName());
+    sliderFileSystem.verifyDirectoryNonexistent(
+        new Path(appDir, application.getName() + ".json"));
+    return appDir;
   }
 
-  // returns true if and only if app is in RUNNING state
-  private boolean isAppInRunningState(String clustername) throws YarnException,
-      IOException {
-    // is this actually a known cluster?
-    sliderFileSystem.locateInstanceDefinition(clustername);
-    ApplicationReport app = findInstance(clustername);
-    if (app == null) {
-      // exit early
-      log.info("Cluster {} not running", clustername);
-      return false;
-    }
-    log.debug("App to upgrade was found: {}:\n{}", clustername,
-        new OnDemandReportStringifier(app));
-    if (app.getYarnApplicationState().ordinal() >= YarnApplicationState.FINISHED.ordinal()) {
-      log.info("Cluster {} is in a terminated state {}. Use command '{}' instead.",
-          clustername, app.getYarnApplicationState(), ACTION_UPDATE);
-      return false;
+  private void persistApp(Path appDir, Application application)
+      throws IOException, SliderException {
+    FsPermission appDirPermission = new FsPermission("777");
+    sliderFileSystem.createWithPermissions(appDir, appDirPermission);
+    Path appJson = new Path(appDir, application.getName() + ".json");
+    jsonSerDeser
+        .save(sliderFileSystem.getFileSystem(), appJson, application, true);
+    log.info(
+        "Persisted application " + application.getName() + " at " + appJson);
+  }
+
+  private void addKeytabResourceIfSecure(SliderFileSystem fileSystem,
+      Map<String, LocalResource> localResource, Configuration conf,
+      String appName) throws IOException, BadConfigException {
+    if (!UserGroupInformation.isSecurityEnabled()) {
+      return;
     }
-
-    // IPC request to upgrade containers is possible if the app is running.
-    if (app.getYarnApplicationState().ordinal() < YarnApplicationState.RUNNING
-        .ordinal()) {
-      log.info("Cluster {} is in a pre-running state {}. To upgrade it needs "
-          + "to be RUNNING.", clustername, app.getYarnApplicationState());
-      return false;
+    String keytabPreInstalledOnHost =
+        conf.get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
+    if (StringUtils.isEmpty(keytabPreInstalledOnHost)) {
+      String amKeytabName =
+          conf.get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
+      String keytabDir = conf.get(SliderXmlConfKeys.KEY_HDFS_KEYTAB_DIR);
+      Path keytabPath =
+          fileSystem.buildKeytabPath(keytabDir, amKeytabName, appName);
+      if (fileSystem.getFileSystem().exists(keytabPath)) {
+        LocalResource keytabRes =
+            fileSystem.createAmResource(keytabPath, LocalResourceType.FILE);
+        localResource
+            .put(SliderKeys.KEYTAB_DIR + "/" + amKeytabName, keytabRes);
+        log.info("Adding AM keytab on hdfs: " + keytabPath);
+      } else {
+        log.warn("No keytab file was found at {}.", keytabPath);
+        if (conf.getBoolean(KEY_AM_LOGIN_KEYTAB_REQUIRED, false)) {
+          throw new BadConfigException("No keytab file was found at %s.",
+              keytabPath);
+        } else {
+          log.warn("The AM will be "
+              + "started without a kerberos authenticated identity. "
+              + "The application is therefore not guaranteed to remain "
+              + "operational beyond 24 hours.");
+        }
+      }
     }
+  }
 
-    return true;
+  @Override
+  public int actionUpgrade(String clustername, ActionUpgradeArgs upgradeArgs)
+      throws YarnException, IOException {
+  //TODO
+    return 0;
   }
 
   protected static void checkForCredentials(Configuration conf,
@@ -952,15 +980,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     }
   }
 
-  private static char[] readOnePassword(String alias) throws IOException {
-    Console console = System.console();
-    if (console == null) {
-      throw new IOException("Unable to input password for " + alias +
-          " because System.console() is null");
-    }
-    return readPassword(alias, console);
-  }
-
   private static char[] readPassword(String alias, Console console)
       throws IOException {
     char[] cred = null;
@@ -987,16 +1006,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   }
 
   @Override
-  public int actionBuild(String clustername,
-      AbstractClusterBuildingActionArgs buildInfo) throws
-                                               YarnException,
-                                               IOException {
-
-    buildInstanceDefinition(clustername, buildInfo, false, false);
-    return EXIT_SUCCESS; 
-  }
-
-  @Override
   public int actionKeytab(ActionKeytabArgs keytabInfo)
       throws YarnException, IOException {
     if (keytabInfo.install) {
@@ -1527,12 +1536,12 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     if (buildInfo.lifetime > 0) {
       updateLifetime(clustername, buildInfo.lifetime);
     } else {
-      buildInstanceDefinition(clustername, buildInfo, true, true);
+      //TODO upgrade
     }
     return EXIT_SUCCESS;
   }
 
-  public void updateLifetime(String appName, long lifetime)
+  public String updateLifetime(String appName, long lifetime)
       throws YarnException, IOException {
     EnumSet<YarnApplicationState> appStates = EnumSet.range(
         YarnApplicationState.NEW, YarnApplicationState.RUNNING);
@@ -1553,396 +1562,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     log.info("Successfully updated lifetime for an application: appName = "
         + appName + ", appId = " + appId
         + ". New expiry time in ISO8601 format is " + newTimeout);
-  }
-
-  /**
-   * Build up the AggregateConfiguration for an application instance then
-   * persists it
-   * @param clustername name of the cluster
-   * @param buildInfo the arguments needed to build the cluster
-   * @param overwrite true if existing cluster directory can be overwritten
-   * @param liveClusterAllowed true if live cluster can be modified
-   * @throws YarnException
-   * @throws IOException
-   */
-
-  public void buildInstanceDefinition(String clustername,
-      AbstractClusterBuildingActionArgs buildInfo, boolean overwrite,
-      boolean liveClusterAllowed) throws YarnException, IOException {
-    buildInstanceDefinition(clustername, buildInfo, overwrite,
-        liveClusterAllowed, false);
-  }
-
-  public void buildInstanceDefinition(String clustername,
-      AbstractClusterBuildingActionArgs buildInfo, boolean overwrite,
-      boolean liveClusterAllowed, boolean isUpgradeFlow) throws YarnException,
-      IOException {
-    // verify that a live cluster isn't there
-    validateClusterName(clustername);
-    verifyBindingsDefined();
-    if (!liveClusterAllowed) {
-      verifyNoLiveClusters(clustername, "Create");
-    }
-
-    Configuration conf = getConfig();
-    String registryQuorum = lookupZKQuorum();
-
-    Path appconfdir = buildInfo.getConfdir();
-    // Provider
-    String providerName = buildInfo.getProvider();
-    requireArgumentSet(Arguments.ARG_PROVIDER, providerName);
-    log.debug("Provider is {}", providerName);
-    SliderAMClientProvider sliderAM = new SliderAMClientProvider(conf);
-    AbstractClientProvider provider =
-      createClientProvider(providerName);
-    InstanceBuilder builder =
-      new InstanceBuilder(sliderFileSystem, 
-                          getConfig(),
-                          clustername);
-    
-    AggregateConf instanceDefinition = new AggregateConf();
-    ConfTreeOperations appConf = instanceDefinition.getAppConfOperations();
-    ConfTreeOperations resources = instanceDefinition.getResourceOperations();
-    ConfTreeOperations internal = instanceDefinition.getInternalOperations();
-    //initial definition is set by the providers 
-    sliderAM.prepareInstanceConfiguration(instanceDefinition);
-    provider.prepareInstanceConfiguration(instanceDefinition);
-
-    //load in any specified on the command line
-    if (buildInfo.resources != null) {
-      try {
-        resources.mergeFile(buildInfo.resources,
-                            new ResourcesInputPropertiesValidator());
-
-      } catch (IOException e) {
-        throw new BadConfigException(e,
-               "incorrect argument to %s: \"%s\" : %s ", 
-                                     Arguments.ARG_RESOURCES,
-                                     buildInfo.resources,
-                                     e.toString());
-      }
-    }
-    if (buildInfo.template != null) {
-      try {
-        appConf.mergeFile(buildInfo.template,
-                          new TemplateInputPropertiesValidator());
-      } catch (IOException e) {
-        throw new BadConfigException(e,
-                                     "incorrect argument to %s: \"%s\" : %s ",
-                                     Arguments.ARG_TEMPLATE,
-                                     buildInfo.template,
-                                     e.toString());
-      }
-    }
-
-    if (isUpgradeFlow) {
-      ActionUpgradeArgs upgradeInfo = (ActionUpgradeArgs) buildInfo;
-      if (!upgradeInfo.force) {
-        validateClientAndClusterResource(clustername, resources);
-      }
-    }
-
-    //get the command line options
-    ConfTree cmdLineAppOptions = buildInfo.buildAppOptionsConfTree();
-    ConfTree cmdLineResourceOptions = buildInfo.buildResourceOptionsConfTree();
-
-    appConf.merge(cmdLineAppOptions);
-
-    AppDefinitionPersister appDefinitionPersister = new AppDefinitionPersister(sliderFileSystem);
-    appDefinitionPersister.processSuppliedDefinitions(clustername, buildInfo, appConf);
-
-    // put the role counts into the resources file
-    Map<String, String> argsRoleMap = buildInfo.getComponentMap();
-    for (Map.Entry<String, String> roleEntry : argsRoleMap.entrySet()) {
-      String count = roleEntry.getValue();
-      String key = roleEntry.getKey();
-      log.info("{} => {}", key, count);
-      resources.getOrAddComponent(key).put(COMPONENT_INSTANCES, count);
-    }
-
-    //all CLI role options
-    Map<String, Map<String, String>> appOptionMap =
-      buildInfo.getCompOptionMap();
-    appConf.mergeComponents(appOptionMap);
-
-    //internal picks up core. values only
-    internal.propagateGlobalKeys(appConf, "slider.");
-    internal.propagateGlobalKeys(appConf, "internal.");
-
-    //copy over role. and yarn. values ONLY to the resources
-    if (PROPAGATE_RESOURCE_OPTION) {
-      resources.propagateGlobalKeys(appConf, "component.");
-      resources.propagateGlobalKeys(appConf, "role.");
-      resources.propagateGlobalKeys(appConf, "yarn.");
-      resources.mergeComponentsPrefix(appOptionMap, "component.", true);
-      resources.mergeComponentsPrefix(appOptionMap, "yarn.", true);
-      resources.mergeComponentsPrefix(appOptionMap, "role.", true);
-    }
-
-    // resource component args
-    appConf.merge(cmdLineResourceOptions);
-    resources.merge(cmdLineResourceOptions);
-    resources.mergeComponents(buildInfo.getResourceCompOptionMap());
-
-    builder.init(providerName, instanceDefinition);
-    builder.resolve();
-    builder.propagateFilename();
-    builder.propagatePrincipals();
-    builder.setImageDetailsIfAvailable(buildInfo.getImage(),
-                                       buildInfo.getAppHomeDir());
-    builder.setQueue(buildInfo.queue);
-
-    String quorum = buildInfo.getZKhosts();
-    if (isUnset(quorum)) {
-      quorum = registryQuorum;
-    }
-    if (isUnset(quorum)) {
-      throw new BadConfigException(E_NO_ZOOKEEPER_QUORUM);
-    }
-    ZKPathBuilder zkPaths = new ZKPathBuilder(getAppName(),
-        getUsername(),
-        clustername,
-        registryQuorum,
-        quorum);
-    String zookeeperRoot = buildInfo.getAppZKPath();
-
-    if (isSet(zookeeperRoot)) {
-      zkPaths.setAppPath(zookeeperRoot);
-    } else {
-      String createDefaultZkNode = appConf.getGlobalOptions()
-          .getOption(AgentKeys.CREATE_DEF_ZK_NODE, "false");
-      if (createDefaultZkNode.equals("true")) {
-        String defaultZKPath = createZookeeperNode(clustername, false);
-        log.debug("ZK node created for application instance: {}", defaultZKPath);
-        if (defaultZKPath != null) {
-          zkPaths.setAppPath(defaultZKPath);
-        }
-      } else {
-        // create AppPath if default is being used
-        String defaultZKPath = createZookeeperNode(clustername, true);
-        log.debug("ZK node assigned to application instance: {}", defaultZKPath);
-        zkPaths.setAppPath(defaultZKPath);
-      }
-    }
-
-    builder.addZKBinding(zkPaths);
-
-    //then propagate any package URI
-    if (buildInfo.packageURI != null) {
-      appConf.set(AgentKeys.PACKAGE_PATH, buildInfo.packageURI);
-    }
-
-    propagatePythonExecutable(conf, instanceDefinition);
-
-    // make any substitutions needed at this stage
-    replaceTokens(appConf.getConfTree(), getUsername(), clustername);
-
-    // TODO: Refactor the validation code and persistence code
-    try {
-      persistInstanceDefinition(overwrite, appconfdir, builder);
-      appDefinitionPersister.persistPackages();
-
-    } catch (LockAcquireFailedException e) {
-      log.warn("Failed to get a Lock on {} : {}", builder, e, e);
-      throw new BadClusterStateException("Failed to save " + clustername
-                                         + ": " + e);
-    }
-
-    // providers to validate what there is
-    // TODO: Validation should be done before persistence
-    AggregateConf instanceDescription = builder.getInstanceDescription();
-    validateInstanceDefinition(sliderAM, instanceDescription, sliderFileSystem);
-    validateInstanceDefinition(provider, instanceDescription, sliderFileSystem);
-  }
-
-  private void validateClientAndClusterResource(String clustername,
-      ConfTreeOperations clientResources) throws BadClusterStateException,
-      SliderException, IOException {
-    log.info("Validating upgrade resource definition with current cluster "
-        + "state (components and instance count)");
-    Map<String, Integer> clientComponentInstances = new HashMap<>();
-    for (String componentName : clientResources.getComponentNames()) {
-      if (!SliderKeys.COMPONENT_AM.equals(componentName)) {
-        clientComponentInstances.put(componentName, clientResources
-            .getComponentOptInt(componentName,
-                COMPONENT_INSTANCES, -1));
-      }
-    }
-
-    AggregateConf clusterConf = null;
-    try {
-      clusterConf = loadPersistedClusterDescription(clustername);
-    } catch (LockAcquireFailedException e) {
-      log.warn("Failed to get a Lock on cluster resource : {}", e, e);
-      throw new BadClusterStateException(
-          "Failed to load client resource definition " + clustername + ": " + e, e);
-    }
-    Map<String, Integer> clusterComponentInstances = new HashMap<>();
-    for (Map.Entry<String, Map<String, String>> component : clusterConf
-        .getResources().components.entrySet()) {
-      if (!SliderKeys.COMPONENT_AM.equals(component.getKey())) {
-        clusterComponentInstances.put(
-            component.getKey(),
-            Integer.decode(component.getValue().get(
-                COMPONENT_INSTANCES)));
-      }
-    }
-
-    // client and cluster should be an exact match
-    Iterator<Map.Entry<String, Integer>> clientComponentInstanceIt = clientComponentInstances
-        .entrySet().iterator();
-    while (clientComponentInstanceIt.hasNext()) {
-      Map.Entry<String, Integer> clientComponentInstanceEntry = clientComponentInstanceIt.next();
-      if (clusterComponentInstances.containsKey(clientComponentInstanceEntry.getKey())) {
-        // compare instance count now and remove from both maps if they match
-        if (clusterComponentInstances
-            .get(clientComponentInstanceEntry.getKey()).intValue() == clientComponentInstanceEntry
-            .getValue().intValue()) {
-          clusterComponentInstances.remove(clientComponentInstanceEntry
-              .getKey());
-          clientComponentInstanceIt.remove();
-        }
-      }
-    }
-
-    if (!clientComponentInstances.isEmpty()
-        || !clusterComponentInstances.isEmpty()) {
-      log.error("Mismatch found in upgrade resource definition and cluster "
-          + "resource state");
-      if (!clientComponentInstances.isEmpty()) {
-        log.info("The upgrade resource definitions that do not match are:");
-        for (Map.Entry<String, Integer> clientComponentInstanceEntry : clientComponentInstances
-            .entrySet()) {
-          log.info("    Component Name: {}, Instance count: {}",
-              clientComponentInstanceEntry.getKey(),
-              clientComponentInstanceEntry.getValue());
-        }
-      }
-      if (!clusterComponentInstances.isEmpty()) {
-        log.info("The cluster resources that do not match are:");
-        for (Map.Entry<String, Integer> clusterComponentInstanceEntry : clusterComponentInstances
-            .entrySet()) {
-          log.info("    Component Name: {}, Instance count: {}",
-              clusterComponentInstanceEntry.getKey(),
-              clusterComponentInstanceEntry.getValue());
-        }
-      }
-      throw new BadConfigException("Resource definition provided for "
-          + "upgrade does not match with that of the currently running "
-          + "cluster.\nIf you are aware of what you are doing, rerun the "
-          + "command with " + Arguments.ARG_FORCE + " option.");
-    }
-  }
-
-  protected void persistInstanceDefinition(boolean overwrite,
-                                         Path appconfdir,
-                                         InstanceBuilder builder)
-      throws IOException, SliderException, LockAcquireFailedException {
-    builder.persist(appconfdir, overwrite);
-  }
-
-  @VisibleForTesting
-  public static void replaceTokens(ConfTree conf,
-      String userName, String clusterName) throws IOException {
-    Map<String,String> newglobal = new HashMap<>();
-    for (Entry<String,String> entry : conf.global.entrySet()) {
-      newglobal.put(entry.getKey(), replaceTokens(entry.getValue(),
-          userName, clusterName));
-    }
-    conf.global.putAll(newglobal);
-
-    for (String component : conf.components.keySet()) {
-      Map<String,String> newComponent = new HashMap<>();
-      for (Entry<String,String> entry : conf.components.get(component).entrySet()) {
-        newComponent.put(entry.getKey(), replaceTokens(entry.getValue(),
-            userName, clusterName));
-      }
-      conf.components.get(component).putAll(newComponent);
-    }
-
-    Map<String,List<String>> newcred = new HashMap<>();
-    for (Entry<String,List<String>> entry : conf.credentials.entrySet()) {
-      List<String> resultList = new ArrayList<>();
-      for (String v : entry.getValue()) {
-        resultList.add(replaceTokens(v, userName, clusterName));
-      }
-      newcred.put(replaceTokens(entry.getKey(), userName, clusterName),
-          resultList);
-    }
-    conf.credentials.clear();
-    conf.credentials.putAll(newcred);
-  }
-
-  private static String replaceTokens(String s, String userName,
-      String clusterName) throws IOException {
-    return s.replaceAll(Pattern.quote("${USER}"), userName)
-        .replaceAll(Pattern.quote("${USER_NAME}"), userName);
-  }
-
-  public FsPermission getClusterDirectoryPermissions(Configuration conf) {
-    String clusterDirPermsOct =
-      conf.get(CLUSTER_DIRECTORY_PERMISSIONS, DEFAULT_CLUSTER_DIRECTORY_PERMISSIONS);
-    return new FsPermission(clusterDirPermsOct);
-  }
-
-  /**
-   * Verify that the Resource Manager is configured (on a non-HA cluster).
-   * with a useful error message
-   * @throws BadCommandArgumentsException the exception raised on an invalid config
-   */
-  public void verifyBindingsDefined() throws BadCommandArgumentsException {
-    InetSocketAddress rmAddr = getRmAddress(getConfig());
-    if (!getConfig().getBoolean(YarnConfiguration.RM_HA_ENABLED, false)
-     && !isAddressDefined(rmAddr)) {
-      throw new BadCommandArgumentsException(
-        E_NO_RESOURCE_MANAGER
-        + " in the argument "
-        + Arguments.ARG_MANAGER
-        + " or the configuration property "
-        + YarnConfiguration.RM_ADDRESS 
-        + " value :" + rmAddr);
-    }
-  }
-
-  /**
-   * Load and start a cluster specification.
-   * This assumes that all validation of args and cluster state
-   * have already taken place
-   *
-   * @param clustername name of the cluster.
-   * @param launchArgs launch arguments
-   * @param lifetime
-   * @return the exit code
-   * @throws YarnException
-   * @throws IOException
-   */
-  protected int startCluster(String clustername, LaunchArgsAccessor launchArgs,
-      long lifetime) throws YarnException, IOException {
-    Path clusterDirectory = sliderFileSystem.buildClusterDirPath(clustername);
-    AggregateConf instanceDefinition = loadInstanceDefinitionUnresolved(
-      clustername,
-      clusterDirectory);
-
-    LaunchedApplication launchedApplication =
-      launchApplication(clustername, clusterDirectory, instanceDefinition,
-                        serviceArgs.isDebug(), lifetime);
-
-    if (launchArgs.getOutputFile() != null) {
-      // output file has been requested. Get the app report and serialize it
-      ApplicationReport report =
-          launchedApplication.getApplicationReport();
-      SerializedApplicationReport sar = new SerializedApplicationReport(report);
-      sar.submitTime = System.currentTimeMillis();
-      ApplicationReportSerDeser serDeser = new ApplicationReportSerDeser();
-      serDeser.save(sar, launchArgs.getOutputFile());
-    }
-    int waittime = launchArgs.getWaittime();
-    if (waittime > 0) {
-      return waitForAppRunning(launchedApplication, waittime, waittime);
-    } else {
-      // no waiting
-      return EXIT_SUCCESS;
-    }
+    return newTimeout;
   }
 
   /**
@@ -1968,415 +1588,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     }
   }
 
-  /**
-   * Load the instance definition. 
-   * @param name cluster name
-   * @param resolved flag to indicate the cluster should be resolved
-   * @return the loaded configuration
-   * @throws IOException IO problems
-   * @throws SliderException slider explicit issues
-   * @throws UnknownApplicationInstanceException if the file is not found
-   */
-    public AggregateConf loadInstanceDefinition(String name,
-        boolean resolved) throws
-        IOException,
-        SliderException {
-
-    Path clusterDirectory = sliderFileSystem.buildClusterDirPath(name);
-    AggregateConf instanceDefinition = loadInstanceDefinitionUnresolved(
-      name,
-      clusterDirectory);
-    if (resolved) {
-      instanceDefinition.resolve();
-    }
-    return instanceDefinition;
-
-  }
-
-  protected AppMasterLauncher setupAppMasterLauncher(String clustername,
-      Path clusterDirectory, AggregateConf instanceDefinition, boolean debugAM,
-      long lifetime)
-    throws YarnException, IOException{
-    deployedClusterName = clustername;
-    validateClusterName(clustername);
-    verifyNoLiveClusters(clustername, "Launch");
-    Configuration config = getConfig();
-    lookupZKQuorum();
-    boolean clusterSecure = isHadoopClusterSecure(config);
-    //create the Slider AM provider -this helps set up the AM
-    SliderAMClientProvider sliderAM = new SliderAMClientProvider(config);
-
-    instanceDefinition.resolve();
-    launchedInstanceDefinition = instanceDefinition;
-
-    ConfTreeOperations internalOperations = instanceDefinition.getInternalOperations();
-    MapOperations internalOptions = internalOperations.getGlobalOptions();
-    ConfTreeOperations resourceOperations = instanceDefinition.getResourceOperations();
-    ConfTreeOperations appOperations = instanceDefinition.getAppConfOperations();
-    Path generatedConfDirPath =
-      createPathThatMustExist(internalOptions.getMandatoryOption(
-        INTERNAL_GENERATED_CONF_PATH));
-    Path snapshotConfPath =
-      createPathThatMustExist(internalOptions.getMandatoryOption(
-        INTERNAL_SNAPSHOT_CONF_PATH));
-
-
-    // cluster Provider
-    AbstractClientProvider provider = createClientProvider(
-      internalOptions.getMandatoryOption(INTERNAL_PROVIDER_NAME));
-    if (log.isDebugEnabled()) {
-      log.debug(instanceDefinition.toString());
-    }
-    MapOperations sliderAMResourceComponent =
-      resourceOperations.getOrAddComponent(SliderKeys.COMPONENT_AM);
-    MapOperations resourceGlobalOptions = resourceOperations.getGlobalOptions();
-
-    // add the tags if available
-    Set<String> applicationTags = provider.getApplicationTags(sliderFileSystem,
-        appOperations, clustername);
-
-    Credentials credentials = null;
-    if (clusterSecure) {
-      // pick up oozie credentials
-      credentials = CredentialUtils.loadTokensFromEnvironment(System.getenv(),
-          config);
-      if (credentials == null) {
-        // nothing from oozie, so build up directly
-        credentials = new Credentials(
-            UserGroupInformation.getCurrentUser().getCredentials());
-        CredentialUtils.addRMRenewableFSDelegationTokens(config,
-            sliderFileSystem.getFileSystem(),
-            credentials);
-        CredentialUtils.addRMDelegationToken(yarnClient, credentials);
-
-      } else {
-        log.info("Using externally supplied credentials to launch AM");
-      }
-    }
-
-    AppMasterLauncher amLauncher = new AppMasterLauncher(clustername,
-        SliderKeys.APP_TYPE,
-        config,
-        sliderFileSystem,
-        yarnClient,
-        clusterSecure,
-        sliderAMResourceComponent,
-        resourceGlobalOptions,
-        applicationTags,
-        credentials);
-
-    ApplicationId appId = amLauncher.getApplicationId();
-    // set the application name;
-    amLauncher.setKeepContainersOverRestarts(true);
-    // set lifetime in submission context;
-    Map<ApplicationTimeoutType, Long> appTimeout = new HashMap<>();
-    if (lifetime > 0) {
-      appTimeout.put(ApplicationTimeoutType.LIFETIME, lifetime);
-    }
-    amLauncher.submissionContext.setApplicationTimeouts(appTimeout);
-    int maxAppAttempts = config.getInt(KEY_AM_RESTART_LIMIT, 0);
-    amLauncher.setMaxAppAttempts(maxAppAttempts);
-
-    sliderFileSystem.purgeAppInstanceTempFiles(clustername);
-    Path tempPath = sliderFileSystem.createAppInstanceTempPath(
-        clustername,
-        appId.toString() + "/am");
-    String libdir = "lib";
-    Path libPath = new Path(tempPath, libdir);
-    sliderFileSystem.getFileSystem().mkdirs(libPath);
-    log.debug("FS={}, tempPath={}, libdir={}", sliderFileSystem, tempPath, libPath);
- 
-    // set local resources for the application master
-    // local files or archives as needed
-    // In this scenario, the jar file for the application master is part of the local resources
-    Map<String, LocalResource> localResources = amLauncher.getLocalResources();
-    
-    // look for the configuration directory named on the command line
-    boolean hasServerLog4jProperties = false;
-    Path remoteConfPath = null;
-    String relativeConfDir = null;
-    String confdirProp = System.getProperty(SliderKeys.PROPERTY_CONF_DIR);
-    if (isUnset(confdirProp)) {
-      log.debug("No local configuration directory provided as system property");
-    } else {
-      File confDir = new File(confdirProp);
-      if (!confDir.exists()) {
-        throw new BadConfigException(E_CONFIGURATION_DIRECTORY_NOT_FOUND,
-                                     confDir);
-      }
-      Path localConfDirPath = createLocalPath(confDir);
-      remoteConfPath = new Path(clusterDirectory, SliderKeys.SUBMITTED_CONF_DIR);
-      log.debug("Slider configuration directory is {}; remote to be {}", 
-          localConfDirPath, remoteConfPath);
-      copyDirectory(config, localConfDirPath, remoteConfPath, null);
-
-      File log4jserver =
-          new File(confDir, SliderKeys.LOG4J_SERVER_PROP_FILENAME);
-      hasServerLog4jProperties = log4jserver.isFile();
-    }
-    if (!hasServerLog4jProperties) {
-      // check for log4j properties in hadoop conf dir
-      String hadoopConfDir = System.getenv(ApplicationConstants.Environment
-          .HADOOP_CONF_DIR.name());
-      if (hadoopConfDir != null) {
-        File localFile = new File(hadoopConfDir, SliderKeys
-            .LOG4J_SERVER_PROP_FILENAME);
-        if (localFile.exists()) {
-          Path localFilePath = createLocalPath(localFile);
-          remoteConfPath = new Path(clusterDirectory,
-              SliderKeys.SUBMITTED_CONF_DIR);
-          Path remoteFilePath = new Path(remoteConfPath, SliderKeys
-              .LOG4J_SERVER_PROP_FILENAME);
-          copy(config, localFilePath, remoteFilePath);
-          hasServerLog4jProperties = true;
-        }
-      }
-    }
-    // the assumption here is that minimr cluster => this is a test run
-    // and the classpath can look after itself
-
-    boolean usingMiniMRCluster = getUsingMiniMRCluster();
-    if (!usingMiniMRCluster) {
-
-      log.debug("Destination is not a MiniYARNCluster -copying full classpath");
-
-      // insert conf dir first
-      if (remoteConfPath != null) {
-        relativeConfDir = SliderKeys.SUBMITTED_CONF_DIR;
-        Map<String, LocalResource> submittedConfDir =
-          sliderFileSystem.submitDirectory(remoteConfPath,
-                                         relativeConfDir);
-        mergeMaps(localResources, submittedConfDir);
-      }
-    }
-    // build up the configuration 
-    // IMPORTANT: it is only after this call that site configurations
-    // will be valid.
-
-    propagatePrincipals(config, instanceDefinition);
-    // validate security data
-
-/*
-    // turned off until tested
-    SecurityConfiguration securityConfiguration =
-        new SecurityConfiguration(config,
-            instanceDefinition, clustername);
-    
-*/
-    Configuration clientConfExtras = new Configuration(false);
-    // then build up the generated path.
-    FsPermission clusterPerms = getClusterDirectoryPermissions(config);
-    copyDirectory(config, snapshotConfPath, generatedConfDirPath,
-        clusterPerms);
-
-
-    // standard AM resources
-    sliderAM.prepareAMAndConfigForLaunch(sliderFileSystem,
-                                       config,
-                                       amLauncher,
-                                       instanceDefinition,
-                                       snapshotConfPath,
-                                       generatedConfDirPath,
-                                       clientConfExtras,
-                                       libdir,
-                                       tempPath,
-                                       usingMiniMRCluster);
-    //add provider-specific resources
-    provider.prepareAMAndConfigForLaunch(sliderFileSystem,
-                                         config,
-                                         amLauncher,
-                                         instanceDefinition,
-                                         snapshotConfPath,
-                                         generatedConfDirPath,
-                                         clientConfExtras,
-                                         libdir,
-                                         tempPath,
-                                         usingMiniMRCluster);
-
-    // now that the site config is fully generated, the provider gets
-    // to do a quick review of them.
-    log.debug("Preflight validation of cluster configuration");
-
-
-    sliderAM.preflightValidateClusterConfiguration(sliderFileSystem,
-                                                 clustername,
-                                                 config,
-                                                 instanceDefinition,
-                                                 clusterDirectory,
-                                                 generatedConfDirPath,
-                                                 clusterSecure
-                                                );
-
-    provider.preflightValidateClusterConfiguration(sliderFileSystem,
-                                                   clustername,
-                                                   config,
-                                                   instanceDefinition,
-                                                   clusterDirectory,
-                                                   generatedConfDirPath,
-                                                   clusterSecure
-                                                  );
-
-
-    if (!(provider instanceof DockerClientProvider)) {
-      Path imagePath =
-          extractImagePath(sliderFileSystem, internalOptions);
-      if (sliderFileSystem.maybeAddImagePath(localResources, imagePath)) {
-        log.debug("Registered image path {}", imagePath);
-      }
-    }
-
-    // build the environment
-    amLauncher.putEnv(
-      buildEnvMap(sliderAMResourceComponent));
-    ClasspathConstructor classpath = buildClasspath(relativeConfDir,
-        libdir,
-        getConfig(),
-        sliderFileSystem,
-        usingMiniMRCluster);
-    amLauncher.setClasspath(classpath);
-    //add english env
-    amLauncher.setEnv("LANG", "en_US.UTF-8");
-    amLauncher.setEnv("LC_ALL", "en_US.UTF-8");
-    amLauncher.setEnv("LANGUAGE", "en_US.UTF-8");
-    amLauncher.maybeSetEnv(HADOOP_JAAS_DEBUG,
-        System.getenv(HADOOP_JAAS_DEBUG));
-    amLauncher.putEnv(getAmLaunchEnv(config));
-
-    for (Map.Entry<String, String> envs : getSystemEnv().entrySet()) {
-      log.debug("System env {}={}", envs.getKey(), envs.getValue());
-    }
-    if (log.isDebugEnabled()) {
-      log.debug("AM classpath={}", classpath);
-      log.debug("Environment Map:\n{}",
-                stringifyMap(amLauncher.getEnv()));
-      log.debug("Files in lib path\n{}", sliderFileSystem.listFSDir(libPath));
-    }
-
-    // rm address
-
-    InetSocketAddress rmSchedulerAddress;
-    try {
-      rmSchedulerAddress = getRmSchedulerAddress(config);
-    } catch (IllegalArgumentException e) {
-      throw new BadConfigException("%s Address invalid: %s",
-               YarnConfiguration.RM_SCHEDULER_ADDRESS,
-               config.get(YarnConfiguration.RM_SCHEDULER_ADDRESS));
-    }
-    String rmAddr = NetUtils.getHostPortString(rmSchedulerAddress);
-
-    JavaCommandLineBuilder commandLine = new JavaCommandLineBuilder();
-    // insert any JVM options);
-    sliderAM.addJVMOptions(instanceDefinition, commandLine);
-    // enable asserts
-    commandLine.enableJavaAssertions();
-    
-    // if the conf dir has a slideram-log4j.properties, switch to that
-    if (hasServerLog4jProperties) {
-      commandLine.sysprop(SYSPROP_LOG4J_CONFIGURATION, LOG4J_SERVER_PROP_FILENAME);
-      commandLine.sysprop(SYSPROP_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR);
-    }
-    
-    // add the AM sevice entry point
-    commandLine.add(SliderAppMaster.SERVICE_CLASSNAME);
-
-    // create action and the cluster name
-    commandLine.add(ACTION_CREATE, clustername);
-
-    // debug
-    if (debugAM) {
-      commandLine.add(Arguments.ARG_DEBUG);
-    }
-
-    // set the cluster directory path
-    commandLine.add(Arguments.ARG_CLUSTER_URI, clusterDirectory.toUri());
-
-    if (!isUnset(rmAddr)) {
-      commandLine.add(Arguments.ARG_RM_ADDR, rmAddr);
-    }
-
-    if (serviceArgs.getFilesystemBinding() != null) {
-      commandLine.add(Arguments.ARG_FILESYSTEM, serviceArgs.getFilesystemBinding());
-    }
-
-    // pass the registry binding
-    commandLine.addConfOptionToCLI(config, RegistryConstants.KEY_REGISTRY_ZK_ROOT,
-        RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT);
-    commandLine.addMandatoryConfOption(config, RegistryConstants.KEY_REGISTRY_ZK_QUORUM);
-
-    if (clusterSecure) {
-      // if the cluster is secure, make sure that
-      // the relevant security settings go over
-      commandLine.addConfOption(config, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY);
-    }
-
-    // copy over any/all YARN RM client values, in case the server-side XML conf file
-    // has the 0.0.0.0 address
-    commandLine.addConfOptions(config,
-        YarnConfiguration.RM_ADDRESS,
-        YarnConfiguration.RM_CLUSTER_ID,
-        YarnConfiguration.RM_HOSTNAME,
-        YarnConfiguration.RM_PRINCIPAL);
-
-    // write out the path output
-    commandLine.addOutAndErrFiles(STDOUT_AM, STDERR_AM);
-
-    String cmdStr = commandLine.build();
-    log.debug("Completed setting up app master command {}", cmdStr);
-
-    amLauncher.addCommandLine(commandLine);
-
-    // the Slider AM gets to configure the AM requirements, not the custom provider
-    sliderAM.prepareAMResourceRequirements(sliderAMResourceComponent,
-        amLauncher.getResource());
-
-
-    // Set the priority for the application master
-    amLauncher.setPriority(config.getInt(KEY_YARN_QUEUE_PRIORITY,
-                                   DEFAULT_YARN_QUEUE_PRIORITY));
-
-    // Set the queue to which this application is to be submitted in the RM
-    // Queue for App master
-    String amQueue = config.get(KEY_YARN_QUEUE, DEFAULT_YARN_QUEUE);
-    String suppliedQueue = internalOperations.getGlobalOptions().get(INTERNAL_QUEUE);
-    if(!isUnset(suppliedQueue)) {
-      amQueue = suppliedQueue;
-      log.info("Using queue {} for the application instance.", amQueue);
-    }
-
-    if (isSet(amQueue)) {
-      amLauncher.setQueue(amQueue);
-    }
-    return amLauncher;
-  }
-
-  /**
-   *
-   * @param clustername name of the cluster
-   * @param clusterDirectory cluster dir
-   * @param instanceDefinition the instance definition
-   * @param debugAM enable debug AM options
-   * @param lifetime
-   * @return the launched application
-   * @throws YarnException
-   * @throws IOException
-   */
-  public LaunchedApplication launchApplication(String clustername, Path clusterDirectory,
-      AggregateConf instanceDefinition, boolean debugAM, long lifetime)
-    throws YarnException, IOException {
-
-    AppMasterLauncher amLauncher = setupAppMasterLauncher(clustername,
-        clusterDirectory,
-        instanceDefinition,
-        debugAM, lifetime);
-
-    applicationId = amLauncher.getApplicationId();
-    log.info("Submitting application {}", applicationId);
-
-    // submit the application
-    LaunchedApplication launchedApplication = amLauncher.submitApplication();
-    return launchedApplication;
-  }
-
   protected Map<String, String> getAmLaunchEnv(Configuration config) {
     String sliderAmLaunchEnv = config.get(KEY_AM_LAUNCH_ENV);
     log.debug("{} = {}", KEY_AM_LAUNCH_ENV, sliderAmLaunchEnv);
@@ -2431,95 +1642,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     return placeholderKeyValueMap;
   }
 
-  private void propagatePythonExecutable(Configuration config,
-                                         AggregateConf instanceDefinition) {
-    String pythonExec = config.get(
-        PYTHON_EXECUTABLE_PATH);
-    if (pythonExec != null) {
-      instanceDefinition.getAppConfOperations().getGlobalOptions().putIfUnset(
-          PYTHON_EXECUTABLE_PATH,
-          pythonExec);
-    }
-  }
-
-
-  /**
-   * Wait for the launched app to be accepted in the time  
-   * and, optionally running.
-   * <p>
-   * If the application
-   *
-   * @param launchedApplication application
-   * @param acceptWaitMillis time in millis to wait for accept
-   * @param runWaitMillis time in millis to wait for the app to be running.
-   * May be null, in which case no wait takes place
-   * @return exit code: success
-   * @throws YarnException
-   * @throws IOException
-   */
-  public int waitForAppRunning(LaunchedApplication launchedApplication,
-      int acceptWaitMillis, int runWaitMillis) throws YarnException, IOException {
-    assert launchedApplication != null;
-    int exitCode;
-    // wait for the submit state to be reached
-    ApplicationReport report = launchedApplication.monitorAppToState(
-      YarnApplicationState.ACCEPTED,
-      new Duration(acceptWaitMillis));
-
-    // may have failed, so check that
-    if (hasAppFinished(report)) {
-      exitCode = buildExitCode(report);
-    } else {
-      // exit unless there is a wait
-
-
-      if (runWaitMillis != 0) {
-        // waiting for state to change
-        Duration duration = new Duration(runWaitMillis * 1000);
-        duration.start();
-        report = launchedApplication.monitorAppToState(
-          YarnApplicationState.RUNNING, duration);
-        if (report != null &&
-            report.getYarnApplicationState() == YarnApplicationState.RUNNING) {
-          exitCode = EXIT_SUCCESS;
-        } else {
-          exitCode = buildExitCode(report);
-        }
-      } else {
-        exitCode = EXIT_SUCCESS;
-      }
-    }
-    return exitCode;
-  }
-
-
-  /**
-   * Propagate any critical principals from the current site config down to the HBase one.
-   * @param config config to read from
-   * @param clusterSpec cluster spec
-   */
-  private void propagatePrincipals(Configuration config,
-                                   AggregateConf clusterSpec) {
-    String dfsPrincipal = config.get(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY);
-    if (dfsPrincipal != null) {
-      String siteDfsPrincipal = SITE_XML_PREFIX + DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
-      clusterSpec.getAppConfOperations().getGlobalOptions().putIfUnset(
-        siteDfsPrincipal,
-        dfsPrincipal);
-    }
-  }
-
-  /**
-   * Create a path that must exist in the cluster fs
-   * @param uri uri to create
-   * @return the path
-   * @throws FileNotFoundException if the path does not exist
-   */
-  public Path createPathThatMustExist(String uri) throws
-      SliderException, IOException {
-    return sliderFileSystem.createPathThatMustExist(uri);
-  }
-
   /**
    * verify that a live cluster isn't there
    * @param clustername cluster name
@@ -2527,7 +1649,7 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
    * @throws SliderException with exit code EXIT_CLUSTER_LIVE
    * if a cluster of that name is either live or starting up.
    */
-  public void verifyNoLiveClusters(String clustername, String action) throws
+  public void verifyNoLiveApp(String clustername, String action) throws
                                                        IOException,
                                                        YarnException {
     List<ApplicationReport> existing = findAllLiveInstances(clustername);
@@ -2554,11 +1676,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     return deployedClusterName;
   }
 
-  @VisibleForTesting
-  public void setDeployedClusterName(String deployedClusterName) {
-    this.deployedClusterName = deployedClusterName;
-  }
-
   /**
    * ask if the client is using a mini MR cluster
    * @return true if they are
@@ -2568,109 +1685,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
         false);
   }
 
-  /**
-   * Get the application name used in the zookeeper root paths
-   * @return an application-specific path in ZK
-   */
-  private String getAppName() {
-    return "slider";
-  }
-
-  /**
-   * Wait for the app to start running (or go past that state)
-   * @param duration time to wait
-   * @return the app report; null if the duration turned out
-   * @throws YarnException YARN or app issues
-   * @throws IOException IO problems
-   */
-  @VisibleForTesting
-  public ApplicationReport monitorAppToRunning(Duration duration)
-    throws YarnException, IOException {
-    return monitorAppToState(YarnApplicationState.RUNNING, duration);
-  }
-
-  /**
-   * Build an exit code for an application from its report.
-   * If the report parameter is null, its interpreted as a timeout
-   * @param report report application report
-   * @return the exit code
-   * @throws IOException
-   * @throws YarnException
-   */
-  private int buildExitCode(ApplicationReport report) throws
-                                                      IOException,
-                                                      YarnException {
-    if (null == report) {
-      return EXIT_TIMED_OUT;
-    }
-
-    YarnApplicationState state = report.getYarnApplicationState();
-    FinalApplicationStatus dsStatus = report.getFinalApplicationStatus();
-    switch (state) {
-      case FINISHED:
-        if (FinalApplicationStatus.SUCCEEDED == dsStatus) {
-          log.info("Application has completed successfully");
-          return EXIT_SUCCESS;
-        } else {
-          log.info("Application finished unsuccessfully." +
-                   "YarnState = {}, DSFinalStatus = {} Breaking monitoring loop",
-                   state, dsStatus);
-          return EXIT_YARN_SERVICE_FINISHED_WITH_ERROR;
-        }
-
-      case KILLED:
-        log.info("Application did not finish. YarnState={}, DSFinalStatus={}",
-                 state, dsStatus);
-        return EXIT_YARN_SERVICE_KILLED;
-
-      case FAILED:
-        log.info("Application Failed. YarnState={}, DSFinalStatus={}", state,
-                 dsStatus);
-        return EXIT_YARN_SERVICE_FAILED;
-
-      default:
-        //not in any of these states
-        return EXIT_SUCCESS;
-    }
-  }
-
-  /**
-   * Monitor the submitted application for reaching the requested state.
-   * Will also report if the app reaches a later state (failed, killed, etc)
-   * Kill application if duration!= null & time expires. 
-   * Prerequisite: the applicatin was launched.
-   * @param desiredState desired state.
-   * @param duration how long to wait -must be more than 0
-   * @return the application report -null on a timeout
-   * @throws YarnException
-   * @throws IOException
-   */
-  @VisibleForTesting
-  public ApplicationReport monitorAppToState(
-    YarnApplicationState desiredState,
-    Duration duration)
-    throws YarnException, IOException {
-    LaunchedApplication launchedApplication =
-      new LaunchedApplication(applicationId, yarnClient);
-    return launchedApplication.monitorAppToState(desiredState, duration);
-  }
-
-  @Override
-  public ApplicationReport getApplicationReport() throws
-                                                  IOException,
-                                                  YarnException {
-    return getApplicationReport(applicationId);
-  }
-
-  @Override
-  public boolean forceKillApplication(String reason)
-    throws YarnException, IOException {
-    if (applicationId != null) {
-      new LaunchedApplication(applicationId, yarnClient).forceKill(reason);
-      return true;
-    }
-    return false;
-  }
 
   /**
    * List Slider instances belonging to a specific user with a specific app
@@ -2721,23 +1735,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   }
 
   /**
-   * Retrieve a list of all live instances. If clustername is supplied then it
-   * returns this specific cluster, if and only if it exists and is live.
-   * 
-   * @param clustername
-   *          cluster name (if looking for a specific live cluster)
-   * @return the list of application names which satisfies the list criteria
-   * @throws IOException
-   * @throws YarnException
-   */
-  public Set<ApplicationReport> getApplicationList(String clustername)
-      throws IOException, YarnException {
-    ActionListArgs args = new ActionListArgs();
-    args.live = true;
-    return getApplicationList(clustername, args);
-  }
-
-  /**
    * Retrieve a list of application instances satisfying the query criteria.
    * 
    * @param clustername
@@ -2757,8 +1754,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
       // the above call throws an exception so the return is not really required
       return Collections.emptySet();
     }
-    verifyBindingsDefined();
-
     boolean live = args.live;
     String state = args.state;
     boolean listContainers = args.containers;
@@ -2868,29 +1863,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     }
   }
 
-  /**
-   * Enumerate slider instances for the current user, and the
-   * most recent app report, where available.
-   * @param listOnlyInState boolean to indicate that the instances should
-   * only include those in a YARN state
-   * <code> minAppState &lt;= currentState &lt;= maxAppState </code>
-   *
-   * @param minAppState minimum application state to include in enumeration.
-   * @param maxAppState maximum application state to include
-   * @return a map of application instance name to description
-   * @throws IOException Any IO problem
-   * @throws YarnException YARN problems
-   */
-  @Override
-  public Map<String, SliderInstanceDescription> enumSliderInstances(
-      boolean listOnlyInState,
-      YarnApplicationState minAppState,
-      YarnApplicationState maxAppState)
-      throws IOException, YarnException {
-    return yarnAppListClient.enumSliderInstances(listOnlyInState,
-        minAppState,
-        maxAppState);
-  }
 
   /**
    * Extract the state of a Yarn application --state argument
@@ -2928,22 +1900,16 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
 
   @Override
   @VisibleForTesting
-  public int actionFlex(String name, ActionFlexArgs args) throws YarnException, IOException {
-    validateClusterName(name);
-    Map<String, String> roleMap = args.getComponentMap();
-    // throw usage exception if no changes proposed
-    if (roleMap.size() == 0) {
-      actionHelp(ACTION_FLEX);
-    }
-    verifyBindingsDefined();
-    log.debug("actionFlex({})", name);
-    Map<String, String> roleInstances = new HashMap<>();
-    for (Map.Entry<String, String> roleEntry : roleMap.entrySet()) {
-      String key = roleEntry.getKey();
-      String val = roleEntry.getValue();
-      roleInstances.put(key, val);
-    }
-    return flex(name, roleInstances);
+  public void actionFlex(String appName, ActionFlexArgs args)
+      throws YarnException, IOException {
+    Component component = new Component();
+    component.setNumberOfContainers(args.getNumberOfContainers());
+    if (StringUtils.isEmpty(args.getComponent())) {
+      component.setName("DEFAULT");
+    } else {
+      component.setName(args.getComponent());
+    }
+    flex(appName, component);
   }
 
   @Override
@@ -2954,7 +1920,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   }
 
   public int actionExists(String name, ActionExistsArgs args) throws YarnException, IOException {
-    verifyBindingsDefined();
     validateClusterName(name);
     boolean checkLive = args.live;
     log.debug("actionExists({}, {}, {})", name, checkLive, args.state);
@@ -3050,14 +2015,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   }
 
   /**
-   * Get at the service registry operations
-   * @return registry client -valid after the service is inited.
-   */
-  public YarnAppListClient getYarnAppListClient() {
-    return yarnAppListClient;
-  }
-
-  /**
    * Find an instance of an application belonging to the current user.
    * @param appname application name
    * @return the app report or null if none is found
@@ -3128,20 +2085,20 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
       return EXIT_SUCCESS;
     }
 
-    ClusterDescription status = verifyAndGetClusterDescription(clustername);
+    Application application = getApplication(clustername);
     String outfile = statusArgs.getOutput();
     if (outfile == null) {
-      log.info(status.toJsonString());
+      log.info(application.toString());
     } else {
-      status.save(new File(outfile).getAbsoluteFile());
+      jsonSerDeser.save(application, new File(statusArgs.getOutput()));
     }
     return EXIT_SUCCESS;
   }
 
   @Override
-  public String actionStatus(String clustername)
+  public Application actionStatus(String clustername)
       throws YarnException, IOException {
-    return verifyAndGetClusterDescription(clustername).toJsonString();
+    return getApplication(clustername);
   }
 
   private void queryAndPrintLifetime(String appName)
@@ -3170,13 +2127,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     }
   }
 
-  private ClusterDescription verifyAndGetClusterDescription(String clustername)
-      throws YarnException, IOException {
-    verifyBindingsDefined();
-    validateClusterName(clustername);
-    return getClusterDescription(clustername);
-  }
-
   @Override
   public int actionVersion() {
     SliderVersionInfo.loadAndPrintVersionInfo(log);
@@ -3184,269 +2134,106 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   }
 
   @Override
-  public int actionFreeze(String clustername,
-      ActionFreezeArgs freezeArgs) throws YarnException, IOException {
-    verifyBindingsDefined();
-    validateClusterName(clustername);
-    int waittime = freezeArgs.getWaittime();
-    String text = freezeArgs.message;
-    boolean forcekill = freezeArgs.force;
-    log.debug("actionFreeze({}, reason={}, wait={}, force={})", clustername,
-              text,
-              waittime,
-              forcekill);
-    
-    //is this actually a known cluster?
-    sliderFileSystem.locateInstanceDefinition(clustername);
-    ApplicationReport app = findInstance(clustername);
+  public void actionStop(String appName, ActionFreezeArgs freezeArgs)
+      throws YarnException, IOException {
+    validateClusterName(appName);
+    ApplicationReport app = findInstance(appName);
     if (app == null) {
-      // exit early
-      log.info("Cluster {} not running", clustername);
-      // not an error to stop a stopped cluster
-      return EXIT_SUCCESS;
-    }
-    log.debug("App to stop was found: {}:\n{}", clustername,
-              new OnDemandReportStringifier(app));
-    if (app.getYarnApplicationState().ordinal() >=
-        YarnApplicationState.FINISHED.ordinal()) {
-      log.info("Cluster {} is in a terminated state {}", clustername,
-               app.getYarnApplicationState());
-      return EXIT_SUCCESS;
+      throw new ApplicationNotFoundException(
+          "Application " + appName + " doesn't exist in RM.");
     }
 
-    // IPC request for a managed shutdown is only possible if the app is running.
-    // so we need to force kill if the app is accepted or submitted
-    if (!forcekill
-        && app.getYarnApplicationState().ordinal() < YarnApplicationState.RUNNING.ordinal()) {
-      log.info("Cluster {} is in a pre-running state {}. Force killing it", clustername,
+    if (app.getYarnApplicationState().ordinal() >= YarnApplicationState.FINISHED
+        .ordinal()) {
+      log.info("Application {} is in a terminated state {}", appName,
           app.getYarnApplicationState());
-      forcekill = true;
-    }
-
-    LaunchedApplication application = new LaunchedApplication(yarnClient, ap

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org