You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2017/07/25 18:03:13 UTC

[31/50] [abbrv] hadoop git commit: YARN-6335. Port slider's groovy unit tests to yarn native services. Contributed by Billie Rinaldi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionUpgradeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionUpgradeArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionUpgradeArgs.java
index 18aa1f5..eb87108 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionUpgradeArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionUpgradeArgs.java
@@ -26,7 +26,7 @@ import java.util.List;
 
 @Parameters(commandNames = { SliderActions.ACTION_UPGRADE },
             commandDescription = SliderActions.DESCRIBE_ACTION_UPGRADE)
-public class ActionUpgradeArgs extends AbstractActionArgs {
+public class ActionUpgradeArgs extends AbstractClusterBuildingActionArgs {
 
   @Override
   public String getActionName() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AppAndResouceOptionArgsDelegate.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AppAndResouceOptionArgsDelegate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AppAndResouceOptionArgsDelegate.java
deleted file mode 100644
index f171708..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/AppAndResouceOptionArgsDelegate.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.common.params;
-
-import com.beust.jcommander.Parameter;
-import org.apache.slider.core.exceptions.BadCommandArgumentsException;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Delegate for application and resource options
- */
-public class AppAndResouceOptionArgsDelegate extends AbstractArgsDelegate {
-
-
-  /**
-   * Options key value
-   */
-  @Parameter(names = {ARG_OPTION, ARG_OPTION_SHORT}, arity = 2,
-             description = ARG_OPTION + "<name> <value>",
-             splitter = DontSplitArguments.class)
-  public List<String> optionTuples = new ArrayList<>(0);
-
-
-  /**
-   * All the app component option triples
-   */
-  @Parameter(names = {ARG_COMP_OPT,  ARG_COMP_OPT_SHORT,  ARG_ROLEOPT}, arity = 3,
-             description = "Component option " + ARG_COMP_OPT +
-                           " <component> <name> <option>",
-             splitter = DontSplitArguments.class)
-  public List<String> compOptTriples = new ArrayList<>(0);
-
-  /**
-   * Resource Options
-   */
-  @Parameter(names = {ARG_RESOURCE_OPT, ARG_RESOURCE_OPT_SHORT}, arity = 2,
-             description = "Resource option "+ ARG_RESOURCE_OPT + "<name> <value>",
-             splitter = DontSplitArguments.class)
-  public List<String> resOptionTuples = new ArrayList<>(0);
-
-
-  /**
-   * All the resource component option triples
-   */
-  @Parameter(names = {ARG_RES_COMP_OPT, ARG_RES_COMP_OPT_SHORT,}, arity = 3,
-             description = "Component resource option " + ARG_RES_COMP_OPT +
-                           " <component> <name> <option>",
-             splitter = DontSplitArguments.class)
-  public List<String> resCompOptTriples = new ArrayList<>(0);
-
-
-  public Map<String, String> getOptionsMap() throws
-                                             BadCommandArgumentsException {
-    return convertTupleListToMap(ARG_OPTION, optionTuples);
-  }
-
-  /**
-   * Get the role heap mapping (may be empty, but never null)
-   * @return role heap mapping
-   * @throws BadCommandArgumentsException parse problem
-   */
-  public Map<String, Map<String, String>> getCompOptionMap() throws
-                                                             BadCommandArgumentsException {
-    return convertTripleListToMaps(ARG_COMP_OPT, compOptTriples);
-  }
-
-  public Map<String, String> getResourceOptionsMap() throws
-                                             BadCommandArgumentsException {
-    return convertTupleListToMap(ARG_RESOURCE_OPT, resOptionTuples);
-  }
-
-  /**
-   * Get the role heap mapping (may be empty, but never null)
-   * @return role heap mapping
-   * @throws BadCommandArgumentsException parse problem
-   */
-  public Map<String, Map<String, String>> getResourceCompOptionMap() throws
-                                                             BadCommandArgumentsException {
-    return convertTripleListToMaps(ARG_RES_COMP_OPT, resCompOptTriples);
-  }
-
-  public void setOption(String key, String value) {
-    optionTuples.add(key);
-    optionTuples.add(value);
-  }
-
-  public void setResourceOption(String key, String value) {
-    resOptionTuples.add(key);
-    resOptionTuples.add(value);
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/Arguments.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
index 45c1fbd..e978957 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/Arguments.java
@@ -107,12 +107,7 @@ public interface Arguments {
   String ARG_LIFETIME = "--lifetime";
   String ARG_REPLACE_PKG = "--replacepkg";
   String ARG_RESOURCE = "--resource";
-  String ARG_RESOURCES = "--resources";
-  String ARG_RES_COMP_OPT = "--rescompopt";
-  String ARG_RES_COMP_OPT_SHORT = "--rco";
   String ARG_RESOURCE_MANAGER = "--rm";
-  String ARG_RESOURCE_OPT = "--resopt";
-  String ARG_RESOURCE_OPT_SHORT = "-ro";
   String ARG_SECURE = "--secure";
   String ARG_SERVICETYPE = "--servicetype";
   String ARG_SERVICES = "--services";
@@ -120,7 +115,6 @@ public interface Arguments {
   String ARG_SOURCE = "--source";
   String ARG_STATE = "--state";
   String ARG_SYSPROP = "-S";
-  String ARG_TEMPLATE = "--template";
   String ARG_TRUSTSTORE = "--truststore";
   String ARG_USER = "--user";
   String ARG_UPLOAD = "--upload";
@@ -136,29 +130,9 @@ public interface Arguments {
  RIGHT PLACE IN THE LIST
  */
 
-
-  /**
-   * Deprecated: use ARG_COMPONENT
-   */
-  @Deprecated
-  String ARG_ROLE = "--role";
-
-  /**
-   * Deprecated: use ARG_COMP_OPT
-   */
-  @Deprecated
-  String ARG_ROLEOPT = "--roleopt";
-
   /**
    * server: URI for the cluster
    */
   String ARG_CLUSTER_URI = "-cluster-uri";
 
-
-  /**
-   * server: Path for the resource manager instance (required)
-   */
-  String ARG_RM_ADDR = "--rm";
-
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
index abd2ce7..dbb5a16 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ClientArgs.java
@@ -53,6 +53,7 @@ public class ClientArgs extends CommonArgs {
   // =========================================================
 
   private final ActionAMSuicideArgs actionAMSuicideArgs = new ActionAMSuicideArgs();
+  private final ActionBuildArgs actionBuildArgs = new ActionBuildArgs();
   private final ActionClientArgs actionClientArgs = new ActionClientArgs();
   private final ActionCreateArgs actionCreateArgs = new ActionCreateArgs();
   private final ActionDependencyArgs actionDependencyArgs = new ActionDependencyArgs();
@@ -62,8 +63,6 @@ public class ClientArgs extends CommonArgs {
   private final ActionFlexArgs actionFlexArgs = new ActionFlexArgs();
   private final ActionFreezeArgs actionFreezeArgs = new ActionFreezeArgs();
   private final ActionHelpArgs actionHelpArgs = new ActionHelpArgs();
-  private final ActionInstallPackageArgs actionInstallPackageArgs = new ActionInstallPackageArgs();
-  private final ActionInstallKeytabArgs actionInstallKeytabArgs = new ActionInstallKeytabArgs();
   private final ActionKDiagArgs actionKDiagArgs = new ActionKDiagArgs();
   private final ActionKeytabArgs actionKeytabArgs = new ActionKeytabArgs();
   private final ActionKillContainerArgs actionKillContainerArgs =
@@ -71,7 +70,6 @@ public class ClientArgs extends CommonArgs {
   private final ActionListArgs actionListArgs = new ActionListArgs();
   private final ActionLookupArgs actionLookupArgs = new ActionLookupArgs();
   private final ActionNodesArgs actionNodesArgs = new ActionNodesArgs();
-  private final ActionPackageArgs actionPackageArgs = new ActionPackageArgs();
   private final ActionRegistryArgs actionRegistryArgs = new ActionRegistryArgs();
   private final ActionResolveArgs actionResolveArgs = new ActionResolveArgs();
   private final ActionResourceArgs actionResourceArgs = new ActionResourceArgs();
@@ -95,6 +93,7 @@ public class ClientArgs extends CommonArgs {
 
     addActions(
         actionAMSuicideArgs,
+        actionBuildArgs,
         actionClientArgs,
         actionCreateArgs,
         actionDependencyArgs,
@@ -104,15 +103,12 @@ public class ClientArgs extends CommonArgs {
         actionFlexArgs,
         actionFreezeArgs,
         actionHelpArgs,
-        actionInstallKeytabArgs,
-        actionInstallPackageArgs,
         actionKDiagArgs,
         actionKeytabArgs,
         actionKillContainerArgs,
         actionListArgs,
         actionLookupArgs,
         actionNodesArgs,
-        actionPackageArgs,
         actionRegistryArgs,
         actionResolveArgs,
         actionResourceArgs,
@@ -153,14 +149,12 @@ public class ClientArgs extends CommonArgs {
     return actionAMSuicideArgs;
   }
 
-  public ActionInstallPackageArgs getActionInstallPackageArgs() { return actionInstallPackageArgs; }
+  public ActionBuildArgs getActionBuildArgs() {
+    return actionBuildArgs;
+  }
 
   public ActionClientArgs getActionClientArgs() { return actionClientArgs; }
 
-  public ActionPackageArgs getActionPackageArgs() { return actionPackageArgs; }
-
-  public ActionInstallKeytabArgs getActionInstallKeytabArgs() { return actionInstallKeytabArgs; }
-
   public ActionKDiagArgs getActionKDiagArgs() {
     return actionKDiagArgs;
   }
@@ -250,6 +244,12 @@ public class ClientArgs extends CommonArgs {
       action = ACTION_HELP;
     }
     switch (action) {
+      case ACTION_BUILD:
+        bindCoreAction(actionBuildArgs);
+        //its a builder, so set those actions too
+        buildingActionArgs = actionBuildArgs;
+        break;
+
       case ACTION_CREATE:
         bindCoreAction(actionCreateArgs);
         //its a builder, so set those actions too
@@ -296,14 +296,6 @@ public class ClientArgs extends CommonArgs {
         bindCoreAction(actionHelpArgs);
         break;
 
-      case ACTION_INSTALL_KEYTAB:
-        bindCoreAction(actionInstallKeytabArgs);
-        break;
-
-      case ACTION_INSTALL_PACKAGE:
-        bindCoreAction(actionInstallPackageArgs);
-        break;
-
       case ACTION_KDIAG:
         bindCoreAction(actionKDiagArgs);
         break;
@@ -328,10 +320,6 @@ public class ClientArgs extends CommonArgs {
         bindCoreAction(actionNodesArgs);
         break;
 
-      case ACTION_PACKAGE:
-        bindCoreAction(actionPackageArgs);
-        break;
-
       case ACTION_REGISTRY:
         bindCoreAction(actionRegistryArgs);
         break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ComponentArgsDelegate.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ComponentArgsDelegate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ComponentArgsDelegate.java
index 5140059..c819b37 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ComponentArgsDelegate.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ComponentArgsDelegate.java
@@ -30,7 +30,7 @@ public class ComponentArgsDelegate extends AbstractArgsDelegate {
   /**
    * This is a listing of the roles to create
    */
-  @Parameter(names = {ARG_COMPONENT,  ARG_COMPONENT_SHORT, ARG_ROLE},
+  @Parameter(names = {ARG_COMPONENT, ARG_COMPONENT_SHORT},
              arity = 2,
              description = "--component <name> <count> e.g. +1 incr by 1, -2 decr by 2, and 3 makes final count 3",
              splitter = DontSplitArguments.class)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/OptionArgsDelegate.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/OptionArgsDelegate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/OptionArgsDelegate.java
new file mode 100644
index 0000000..e63bd12
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/OptionArgsDelegate.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.params;
+
+import com.beust.jcommander.Parameter;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Delegate for application and resource options.
+ */
+public class OptionArgsDelegate extends AbstractArgsDelegate {
+
+  /**
+   * Options key value.
+   */
+  @Parameter(names = {ARG_OPTION, ARG_OPTION_SHORT}, arity = 2,
+             description = ARG_OPTION + "<name> <value>",
+             splitter = DontSplitArguments.class)
+  public List<String> optionTuples = new ArrayList<>(0);
+
+
+  /**
+   * All the app component option triples.
+   */
+  @Parameter(names = {ARG_COMP_OPT, ARG_COMP_OPT_SHORT}, arity = 3,
+             description = "Component option " + ARG_COMP_OPT +
+                           " <component> <name> <option>",
+             splitter = DontSplitArguments.class)
+  public List<String> compOptTriples = new ArrayList<>(0);
+
+  public Map<String, String> getOptionsMap() throws
+                                             BadCommandArgumentsException {
+    return convertTupleListToMap(ARG_OPTION, optionTuples);
+  }
+
+  /**
+   * Get the role heap mapping (may be empty, but never null).
+   * @return role heap mapping
+   * @throws BadCommandArgumentsException parse problem
+   */
+  public Map<String, Map<String, String>> getCompOptionMap()
+      throws BadCommandArgumentsException {
+    return convertTripleListToMaps(ARG_COMP_OPT, compOptTriples);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
index 82e5903..df1a5fe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/SliderActions.java
@@ -39,14 +39,12 @@ public interface SliderActions {
   String ACTION_STOP = "stop";
   String ACTION_HELP = "help";
   String ACTION_INSTALL_KEYTAB = "install-keytab";
-  String ACTION_INSTALL_PACKAGE = "install-package";
   String ACTION_KDIAG = "kdiag";
   String ACTION_KEYTAB = "keytab";
   String ACTION_KILL_CONTAINER = "kill-container";
   String ACTION_LIST = "list";
   String ACTION_LOOKUP = "lookup";
   String ACTION_NODES = "nodes";
-  String ACTION_PACKAGE = "package";
   String ACTION_PREFLIGHT = "preflight";
   String ACTION_RECONFIGURE = "reconfigure";
   String ACTION_REGISTRY = "registry";
@@ -99,12 +97,7 @@ public interface SliderActions {
                         "Start a stopped application";
   String DESCRIBE_ACTION_VERSION =
                         "Print the Slider version information";
-  String DESCRIBE_ACTION_INSTALL_PACKAGE = "Install application package." +
-  		" Deprecated, use '" + ACTION_PACKAGE + " " + ClientArgs.ARG_INSTALL + "'.";
-  String DESCRIBE_ACTION_PACKAGE = "Install/list/delete application packages and list app instances that use the packages";
   String DESCRIBE_ACTION_CLIENT = "Install the application client in the specified directory or obtain a client keystore or truststore";
-  String DESCRIBE_ACTION_INSTALL_KEYTAB = "Install the Kerberos keytab." +
-  		" Deprecated, use '" + ACTION_KEYTAB + " " + ClientArgs.ARG_INSTALL + "'.";
   String DESCRIBE_ACTION_KEYTAB = "Manage a Kerberos keytab file (install, delete, list) in the sub-folder 'keytabs' of the user's Slider base directory";
   String DESCRIBE_ACTION_DIAGNOSTIC = "Diagnose the configuration of the running slider application and slider client";
   String DESCRIBE_ACTION_RESOURCE = "Manage a file (install, delete, list) in the 'resources' sub-folder of the user's Slider base directory";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java
index 02eba49..0e94a29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/ConfigHelper.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.registry.client.api.RegistryConstants;
 import org.apache.slider.common.SliderKeys;
 import org.apache.slider.common.SliderXmlConfKeys;
 import org.apache.slider.core.exceptions.BadConfigException;
@@ -613,13 +612,6 @@ public class ConfigHelper {
    * Register anything we consider deprecated
    */
   public static void registerDeprecatedConfigItems() {
-    Configuration.addDeprecation(
-        SliderXmlConfKeys.REGISTRY_ZK_QUORUM,
-        RegistryConstants.KEY_REGISTRY_ZK_QUORUM);
-    Configuration.addDeprecation(
-        SliderXmlConfKeys.REGISTRY_PATH,
-        RegistryConstants.KEY_REGISTRY_ZK_ROOT);
-    
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
index 9d7c40a..80b70b9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
@@ -21,14 +21,10 @@ package org.apache.slider.common.tools;
 import com.google.common.base.Preconditions;
 import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
-import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
-import org.apache.commons.compress.archivers.zip.ZipArchiveInputStream;
-import org.apache.commons.io.output.ByteArrayOutputStream;
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -52,6 +48,8 @@ import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.slider.Slider;
 import org.apache.slider.api.RoleKeys;
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.api.resource.Component;
 import org.apache.slider.api.types.ContainerInformation;
 import org.apache.slider.common.SliderKeys;
 import org.apache.slider.common.SliderXmlConfKeys;
@@ -70,7 +68,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.BufferedOutputStream;
-import java.io.ByteArrayInputStream;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
@@ -502,6 +499,26 @@ public final class SliderUtils {
   }
 
   /**
+   * Extract the first line of a multi-line string. This is typically used to
+   * prune the stack trace appended to the end of exception messages returned by
+   * YARN in AMRMClientAsync callbacks.
+   *
+   * @param msg
+   *          message string (most likely multi-lines)
+   * @return the first line of a multi-line string or the original string if it
+   *         is a null, empty or single-line
+   */
+  public static String extractFirstLine(String msg) {
+    if (StringUtils.isNotBlank(msg)) {
+      int newlineIndex = msg.indexOf(System.lineSeparator());
+      if (newlineIndex != -1) {
+        msg = msg.substring(0, newlineIndex);
+      }
+    }
+    return msg;
+  }
+
+  /**
    * Create a configuration with Slider-specific tuning.
    * This is done rather than doing custom configs.
    * @return the config
@@ -2046,48 +2063,6 @@ public final class SliderUtils {
     }
   }
 
-  public static InputStream getApplicationResourceInputStream(FileSystem fs,
-      Path appPath,
-      String entry)
-      throws IOException {
-    InputStream is = null;
-    try(FSDataInputStream appStream = fs.open(appPath)) {
-      ZipArchiveInputStream zis = new ZipArchiveInputStream(appStream);
-      ZipArchiveEntry zipEntry;
-      boolean done = false;
-      while (!done && (zipEntry = zis.getNextZipEntry()) != null) {
-        if (entry.equals(zipEntry.getName())) {
-          int size = (int) zipEntry.getSize();
-          if (size != -1) {
-            log.info("Reading {} of size {}", zipEntry.getName(),
-                zipEntry.getSize());
-            byte[] content = new byte[size];
-            int offset = 0;
-            while (offset < size) {
-              offset += zis.read(content, offset, size - offset);
-            }
-            is = new ByteArrayInputStream(content);
-          } else {
-            log.debug("Size unknown. Reading {}", zipEntry.getName());
-            try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
-              while (true) {
-                int byteRead = zis.read();
-                if (byteRead == -1) {
-                  break;
-                }
-                baos.write(byteRead);
-              }
-              is = new ByteArrayInputStream(baos.toByteArray());
-            }
-          }
-          done = true;
-        }
-      }
-    }
-
-    return is;
-  }
-
   /**
    * Check for any needed libraries being present. On Unix none are needed;
    * on windows they must be present
@@ -2525,4 +2500,53 @@ public final class SliderUtils {
     return EnumSet.range(YarnApplicationState.FINISHED,
         YarnApplicationState.KILLED);
   }
+
+  public static final String DAYS = ".days";
+  public static final String HOURS = ".hours";
+  public static final String MINUTES = ".minutes";
+  public static final String SECONDS = ".seconds";
+
+  /**
+   * Get the time range of a set of keys.
+   * @param conf configuration to read properties from
+   * @param basekey base key to which suffix gets applied
+   * @param defDays
+   * @param defHours
+   * @param defMins
+   * @param defSecs
+   * @return the aggregate time range in seconds
+   */
+  public static long getTimeRange(org.apache.slider.api.resource
+      .Configuration conf,
+      String basekey,
+      long defDays,
+      long defHours,
+      long defMins,
+      long defSecs) {
+    Preconditions.checkArgument(basekey != null);
+    long days = conf.getPropertyLong(basekey + DAYS, defDays);
+    long hours = conf.getPropertyLong(basekey + HOURS, defHours);
+
+    long minutes = conf.getPropertyLong(basekey + MINUTES, defMins);
+    long seconds = conf.getPropertyLong(basekey + SECONDS, defSecs);
+    // range check
+    Preconditions.checkState(days >= 0 && hours >= 0 && minutes >= 0
+            && seconds >= 0,
+        "Time range for %s has negative time component %s:%s:%s:%s",
+        basekey, days, hours, minutes, seconds);
+
+    // calculate total time, schedule the reset if expected
+    long totalMinutes = days * 24 * 60 + hours * 24 + minutes;
+    return totalMinutes * 60 + seconds;
+  }
+
+  public static void resolve(Application application) {
+    org.apache.slider.api.resource.Configuration global = application
+        .getConfiguration();
+    for (Component component : application.getComponents()) {
+      mergeMapsIgnoreDuplicateKeys(component.getConfiguration().getProperties(),
+          global.getProperties());
+    }
+    // TODO merge other information to components
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
index 4182459..965ea35 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
@@ -43,7 +43,6 @@ import org.slf4j.LoggerFactory;
 
 import java.lang.reflect.Method;
 import java.lang.reflect.InvocationTargetException;
-import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
index ef96c9b..4302530 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
@@ -337,5 +337,12 @@ public class ZKIntegration implements Watcher, Closeable {
     return SVC_SLIDER_USERS + "/" + username;
   }
 
-
+  /**
+   * Blocking enum of users.
+   * @return an unordered list of clusters under a user
+   */
+  public List<String> getClusters() throws KeeperException,
+      InterruptedException {
+    return zookeeper.getChildren(userPath, null);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
index e0299e7..cb39368 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
@@ -18,6 +18,7 @@
 
 package org.apache.slider.providers;
 
+import org.apache.slider.api.ResourceKeys;
 import org.apache.slider.api.resource.Component;
 
 /**
@@ -36,6 +37,14 @@ public final class ProviderRole {
   public final String labelExpression;
   public final Component component;
 
+  public ProviderRole(String name, int id) {
+    this(name,
+        id,
+        PlacementPolicy.DEFAULT,
+        ResourceKeys.DEFAULT_NODE_FAILURE_THRESHOLD,
+        ResourceKeys.DEFAULT_PLACEMENT_ESCALATE_DELAY_SECONDS,
+        ResourceKeys.DEF_YARN_LABEL_EXPRESSION);
+  }
 
   /**
    * Create a provider role
@@ -59,7 +68,8 @@ public final class ProviderRole {
         policy,
         nodeFailureThreshold,
         placementTimeoutSeconds,
-        labelExpression, null);
+        labelExpression,
+        new Component().name(name).numberOfContainers(0L));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
index 4aeffa6..7473dab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -97,7 +97,6 @@ import org.apache.slider.common.tools.PortScanner;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.common.tools.SliderUtils;
 import org.apache.slider.common.tools.SliderVersionInfo;
-import org.apache.slider.core.conf.MapOperations;
 import org.apache.slider.core.exceptions.BadConfigException;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.exceptions.SliderInternalStateException;
@@ -855,7 +854,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     providerService.setAMState(stateForProviders);
 
     // chaos monkey
-//    maybeStartMonkey();
+    maybeStartMonkey();
 
     // if not a secure cluster, extract the username -it will be
     // propagated to workers
@@ -1597,7 +1596,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    * @throws SliderException slider problems, including invalid configs
    * @throws IOException IO problems
    */
-  public void flexCluster(Messages.FlexComponentRequestProto request)
+  public void flexCluster(Messages.FlexComponentsRequestProto request)
       throws IOException, SliderException {
     if (request != null) {
       appState.updateComponents(request);
@@ -1619,24 +1618,12 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
 
     ResetFailureWindow reset = new ResetFailureWindow(rmOperationHandler);
 
-    long days =
-        conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".days",
-            ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_DAYS);
-    long hours =
-        conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".hours",
-            ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_HOURS);
-    long minutes =
-        conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".minutes",
-            ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_MINUTES);
-    long seconds =
-        conf.getPropertyLong(ResourceKeys.CONTAINER_FAILURE_WINDOW + ".seconds",
-            0);
-    Preconditions
-        .checkState(days >= 0 && hours >= 0 && minutes >= 0 && seconds >= 0,
-            "Time range for has negative time component %s:%s:%s:%s", days,
-            hours, minutes, seconds);
-    long totalMinutes = days * 24 * 60 + hours * 24 + minutes;
-    long totalSeconds = totalMinutes * 60 + seconds;
+    long totalSeconds = SliderUtils.getTimeRange(conf,
+        ResourceKeys.CONTAINER_FAILURE_WINDOW,
+        ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_DAYS,
+        ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_HOURS,
+        ResourceKeys.DEFAULT_CONTAINER_FAILURE_WINDOW_MINUTES,
+        0);
     if (totalSeconds > 0) {
       log.info("Scheduling the failure window reset interval to every {}"
               + " seconds", totalSeconds);
@@ -1810,12 +1797,12 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       LOG_YARN.error("AMRMClientAsync.onError() received {}", e, e);
       signalAMComplete(new ActionStopSlider("stop", EXIT_EXCEPTION_THROWN,
           FinalApplicationStatus.FAILED,
-          "AMRMClientAsync.onError() received " + e));
+          SliderUtils.extractFirstLine(e.getLocalizedMessage())));
     } else if (e instanceof InvalidApplicationMasterRequestException) {
       // halt the AM
       LOG_YARN.error("AMRMClientAsync.onError() received {}", e, e);
       queue(new ActionHalt(EXIT_EXCEPTION_THROWN,
-          "AMRMClientAsync.onError() received " + e));
+          SliderUtils.extractFirstLine(e.getLocalizedMessage())));
     } else {
       // ignore and log
       LOG_YARN.info("Ignoring AMRMClientAsync.onError() received {}", e);
@@ -2040,7 +2027,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    */
   public void onExceptionInThread(Thread thread, Throwable exception) {
     log.error("Exception in {}: {}", thread.getName(), exception, exception);
-    
+
     // if there is a teardown in progress, ignore it
     if (amCompletionFlag.get()) {
       log.info("Ignoring exception: shutdown in progress");
@@ -2052,26 +2039,27 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       signalAMComplete(new ActionStopSlider("stop",
           exitCode,
           FinalApplicationStatus.FAILED,
-          exception.toString()));
+          SliderUtils.extractFirstLine(exception.getLocalizedMessage())));
     }
   }
 
   /**
-   * TODO Start the chaos monkey
+   * TODO Read chaos monkey params from AM configuration rather than app
+   * configuration
    * @return true if it started
    */
   private boolean maybeStartMonkey() {
-//    MapOperations internals = getGlobalInternalOptions();
-    MapOperations internals = new MapOperations();
-    Boolean enabled =
-        internals.getOptionBool(InternalKeys.CHAOS_MONKEY_ENABLED,
-            InternalKeys.DEFAULT_CHAOS_MONKEY_ENABLED);
+    org.apache.slider.api.resource.Configuration configuration =
+        application.getConfiguration();
+    boolean enabled = configuration.getPropertyBool(
+        InternalKeys.CHAOS_MONKEY_ENABLED,
+        InternalKeys.DEFAULT_CHAOS_MONKEY_ENABLED);
     if (!enabled) {
       log.debug("Chaos monkey disabled");
       return false;
     }
     
-    long monkeyInterval = internals.getTimeRange(
+    long monkeyInterval = SliderUtils.getTimeRange(configuration,
         InternalKeys.CHAOS_MONKEY_INTERVAL,
         InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS,
         InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS,
@@ -2083,7 +2071,7 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       return false;
     }
 
-    long monkeyDelay = internals.getTimeRange(
+    long monkeyDelay = SliderUtils.getTimeRange(configuration,
         InternalKeys.CHAOS_MONKEY_DELAY,
         0,
         0,
@@ -2098,10 +2086,11 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     // configure the targets
     
     // launch failure: special case with explicit failure triggered now
-    int amLaunchFailProbability = internals.getOptionInt(
+    int amLaunchFailProbability = configuration.getPropertyInt(
         InternalKeys.CHAOS_MONKEY_PROBABILITY_AM_LAUNCH_FAILURE,
         0);
-    if (amLaunchFailProbability> 0 && monkey.chaosCheck(amLaunchFailProbability)) {
+    if (amLaunchFailProbability > 0 && monkey.chaosCheck(
+        amLaunchFailProbability)) {
       log.info("Chaos Monkey has triggered AM Launch failure");
       // trigger a failure
       ActionStopSlider stop = new ActionStopSlider("stop",
@@ -2112,12 +2101,12 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       queue(stop);
     }
     
-    int amKillProbability = internals.getOptionInt(
+    int amKillProbability = configuration.getPropertyInt(
         InternalKeys.CHAOS_MONKEY_PROBABILITY_AM_FAILURE,
         InternalKeys.DEFAULT_CHAOS_MONKEY_PROBABILITY_AM_FAILURE);
     monkey.addTarget("AM killer",
         new ChaosKillAM(actionQueues, -1), amKillProbability);
-    int containerKillProbability = internals.getOptionInt(
+    int containerKillProbability = configuration.getPropertyInt(
         InternalKeys.CHAOS_MONKEY_PROBABILITY_CONTAINER_FAILURE,
         InternalKeys.DEFAULT_CHAOS_MONKEY_PROBABILITY_CONTAINER_FAILURE);
     monkey.addTarget("Container killer",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
index 220f2ca..a7b94ed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
@@ -26,9 +26,9 @@ import java.util.concurrent.TimeUnit;
 
 public class ActionFlexCluster extends AsyncAction {
 
-  final Messages.FlexComponentRequestProto requestProto;
+  final Messages.FlexComponentsRequestProto requestProto;
   public ActionFlexCluster(String name, long delay, TimeUnit timeUnit,
-      Messages.FlexComponentRequestProto requestProto) {
+      Messages.FlexComponentsRequestProto requestProto) {
     super(name, delay, timeUnit, ATTR_CHANGES_APP_SIZE);
     this.requestProto = requestProto;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java
index 510ff73..5dcbe9b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/metrics/SliderMetrics.java
@@ -37,13 +37,13 @@ import static org.apache.hadoop.metrics2.lib.Interns.info;
 @Metrics(context = "yarn-native-service")
 public class SliderMetrics implements MetricsSource {
 
-  @Metric("containers pending")
-  public MutableGaugeInt containersPending;
+  @Metric("containers requested")
+  public MutableGaugeInt containersRequested;
   @Metric("anti-affinity containers pending")
   public MutableGaugeInt pendingAAContainers;
-  @Metric("containers pending")
+  @Metric("containers running")
   public MutableGaugeInt containersRunning;
-  @Metric("containers requested")
+  @Metric("containers desired")
   public MutableGaugeInt containersDesired;
   @Metric("containers completed")
   public MutableGaugeInt containersCompleted;
@@ -53,8 +53,12 @@ public class SliderMetrics implements MetricsSource {
   public MutableGaugeInt failedSinceLastThreshold;
   @Metric("containers preempted")
   public MutableGaugeInt containersPreempted;
+  @Metric("containers exceeded limits")
+  public MutableGaugeInt containersLimitsExceeded;
   @Metric("containers surplus")
   public MutableGaugeInt surplusContainers;
+  @Metric("containers failed due to disk failure")
+  public MutableGaugeInt containersDiskFailure;
 
   protected final MetricsRegistry registry;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
index 7830a1e..526ab7c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
@@ -70,11 +70,11 @@ public class SliderClusterProtocolPBImpl implements SliderClusterProtocolPB {
   }
 
   @Override
-  public Messages.FlexComponentResponseProto flexComponent(
-      RpcController controller, Messages.FlexComponentRequestProto request)
+  public Messages.FlexComponentsResponseProto flexComponents(
+      RpcController controller, Messages.FlexComponentsRequestProto request)
       throws ServiceException {
     try {
-      return real.flexComponent(request);
+      return real.flexComponents(request);
     } catch (IOException e) {
       throw wrap(e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
index 1902ec1..2e40a9b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
@@ -110,10 +110,10 @@ public class SliderClusterProtocolProxy implements SliderClusterProtocol {
   }
 
   @Override
-  public Messages.FlexComponentResponseProto flexComponent(
-      Messages.FlexComponentRequestProto request) throws IOException {
+  public Messages.FlexComponentsResponseProto flexComponents(
+      Messages.FlexComponentsRequestProto request) throws IOException {
     try {
-      return endpoint.flexComponent(NULL_CONTROLLER, request);
+      return endpoint.flexComponents(NULL_CONTROLLER, request);
     } catch (ServiceException e) {
       throw convert(e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
index eaa0a81..f88d586 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
@@ -196,11 +196,11 @@ public class SliderIPCService extends AbstractService
   }
 
   @Override
-  public Messages.FlexComponentResponseProto flexComponent(
-      Messages.FlexComponentRequestProto request) throws IOException {
+  public Messages.FlexComponentsResponseProto flexComponents(
+      Messages.FlexComponentsRequestProto request) throws IOException {
     onRpcCall("flex");
     schedule(new ActionFlexCluster("flex", 1, TimeUnit.MILLISECONDS, request));
-    return Messages.FlexComponentResponseProto.newBuilder().build();
+    return Messages.FlexComponentsResponseProto.newBuilder().build();
   }
 
   @Override //SliderClusterProtocol

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
index 37c730f..cc19eee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
@@ -16,9 +16,23 @@
  */
 package org.apache.slider.server.appmaster.security;
 
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import static org.apache.slider.core.main.LauncherExitCodes.EXIT_UNAUTHORIZED;
+
+import org.apache.slider.api.resource.Application;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.common.SliderXmlConfKeys;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.SliderException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.File;
+import java.io.IOException;
+
 /**
  * Class keeping code security information
  */
@@ -26,111 +40,122 @@ public class SecurityConfiguration {
 
   protected static final Logger log =
       LoggerFactory.getLogger(SecurityConfiguration.class);
+  private final Configuration configuration;
+  private final Application application;
   private String clusterName;
 
-//  private void validate() throws SliderException {
-//    if (isSecurityEnabled()) {
-//      String principal = instanceDefinition.getAppConfOperations()
-//          .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
-//      if(SliderUtils.isUnset(principal)) {
-//        // if no login identity is available, fail
-//        UserGroupInformation loginUser = null;
-//        try {
-//          loginUser = getLoginUser();
-//        } catch (IOException e) {
-//          throw new SliderException(EXIT_UNAUTHORIZED, e,
-//                                    "No principal configured for the application and "
-//                                    + "exception raised during retrieval of login user. "
-//                                    + "Unable to proceed with application "
-//                                    + "initialization.  Please ensure a value "
-//                                    + "for %s exists in the application "
-//                                    + "configuration or the login issue is addressed",
-//                                    SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
-//        }
-//        if (loginUser == null) {
-//          throw new SliderException(EXIT_UNAUTHORIZED,
-//                                    "No principal configured for the application "
-//                                    + "and no login user found. "
-//                                    + "Unable to proceed with application "
-//                                    + "initialization.  Please ensure a value "
-//                                    + "for %s exists in the application "
-//                                    + "configuration or the login issue is addressed",
-//                                    SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
-//        }
-//      }
-//      // ensure that either local or distributed keytab mechanism is enabled,
-//      // but not both
-//      String keytabFullPath = instanceDefinition.getAppConfOperations()
-//          .getComponent(SliderKeys.COMPONENT_AM)
-//          .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
-//      String keytabName = instanceDefinition.getAppConfOperations()
-//          .getComponent(SliderKeys.COMPONENT_AM)
-//          .get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
-//      if (SliderUtils.isSet(keytabFullPath) && SliderUtils.isSet(keytabName)) {
-//        throw new SliderException(EXIT_UNAUTHORIZED,
-//                                  "Both a keytab on the cluster host (%s) and a"
-//                                  + " keytab to be retrieved from HDFS (%s) are"
-//                                  + " specified.  Please configure only one keytab"
-//                                  + " retrieval mechanism.",
-//                                  SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH,
-//                                  SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
-//
-//      }
-//    }
-//  }
-//
-//  protected UserGroupInformation getLoginUser() throws IOException {
-//    return UserGroupInformation.getLoginUser();
-//  }
-//
-//  public boolean isSecurityEnabled () {
-//    return SliderUtils.isHadoopClusterSecure(configuration);
-//  }
-//
-//  public String getPrincipal () throws IOException {
-//    String principal = instanceDefinition.getAppConfOperations()
-//        .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
-//    if (SliderUtils.isUnset(principal)) {
-//      principal = UserGroupInformation.getLoginUser().getShortUserName();
-//      log.info("No principal set in the slider configuration.  Will use AM login"
-//               + " identity {} to attempt keytab-based login", principal);
-//    }
-//
-//    return principal;
-//  }
-//
-//  public boolean isKeytabProvided() {
-//    boolean keytabProvided = instanceDefinition.getAppConfOperations()
-//                    .getComponent(SliderKeys.COMPONENT_AM)
-//                    .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH) != null ||
-//                instanceDefinition.getAppConfOperations()
-//                    .getComponent(SliderKeys.COMPONENT_AM).
-//                    get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME) != null;
-//    return keytabProvided;
-//
-//  }
-//
-//  public File getKeytabFile(AggregateConf instanceDefinition)
-//      throws SliderException, IOException {
-//    //TODO implement this for dash semantic
-//    String keytabFullPath = instanceDefinition.getAppConfOperations()
-//        .getComponent(SliderKeys.COMPONENT_AM)
-//        .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
-//    File localKeytabFile;
-//    if (SliderUtils.isUnset(keytabFullPath)) {
-//      // get the keytab
-//      String keytabName = instanceDefinition.getAppConfOperations()
-//          .getComponent(SliderKeys.COMPONENT_AM).
-//              get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
-//      log.info("No host keytab file path specified. Will attempt to retrieve"
-//               + " keytab file {} as a local resource for the container",
-//               keytabName);
-//      // download keytab to local, protected directory
-//      localKeytabFile = new File(SliderKeys.KEYTAB_DIR, keytabName);
-//    } else {
-//      log.info("Using host keytab file {} for login", keytabFullPath);
-//      localKeytabFile = new File(keytabFullPath);
-//    }
-//    return localKeytabFile;
-//  }
+  public SecurityConfiguration(Configuration configuration,
+                               Application application,
+                               String clusterName) throws SliderException {
+    Preconditions.checkNotNull(configuration);
+    Preconditions.checkNotNull(application);
+    Preconditions.checkNotNull(clusterName);
+    this.configuration = configuration;
+    this.application = application;
+    this.clusterName = clusterName;
+    validate();
+  }
+
+  private void validate() throws SliderException {
+    if (isSecurityEnabled()) {
+      // TODO use AM configuration rather than app configuration
+      String principal = application.getConfiguration().getProperty(
+          SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
+      if(SliderUtils.isUnset(principal)) {
+        // if no login identity is available, fail
+        UserGroupInformation loginUser = null;
+        try {
+          loginUser = getLoginUser();
+        } catch (IOException e) {
+          throw new SliderException(EXIT_UNAUTHORIZED, e,
+              "No principal configured for the application and "
+                  + "exception raised during retrieval of login user. "
+                  + "Unable to proceed with application "
+                  + "initialization.  Please ensure a value "
+                  + "for %s exists in the application "
+                  + "configuration or the login issue is addressed",
+              SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
+        }
+        if (loginUser == null) {
+          throw new SliderException(EXIT_UNAUTHORIZED,
+              "No principal configured for the application "
+                  + "and no login user found. "
+                  + "Unable to proceed with application "
+                  + "initialization.  Please ensure a value "
+                  + "for %s exists in the application "
+                  + "configuration or the login issue is addressed",
+              SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
+        }
+      }
+      // ensure that either local or distributed keytab mechanism is enabled,
+      // but not both
+      String keytabFullPath = application.getConfiguration().getProperty(
+          SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
+      String keytabName = application.getConfiguration().getProperty(
+          SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
+      if (SliderUtils.isSet(keytabFullPath) && SliderUtils.isSet(keytabName)) {
+        throw new SliderException(EXIT_UNAUTHORIZED,
+            "Both a keytab on the cluster host (%s) and a"
+                + " keytab to be retrieved from HDFS (%s) are"
+                + " specified.  Please configure only one keytab"
+                + " retrieval mechanism.",
+            SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH,
+            SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
+
+      }
+    }
+  }
+
+  protected UserGroupInformation getLoginUser() throws IOException {
+    return UserGroupInformation.getLoginUser();
+  }
+
+  public boolean isSecurityEnabled() {
+    return SliderUtils.isHadoopClusterSecure(configuration);
+  }
+
+  public String getPrincipal() throws IOException {
+    String principal = application.getConfiguration().getProperty(
+        SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
+    if (SliderUtils.isUnset(principal)) {
+      principal = UserGroupInformation.getLoginUser().getShortUserName();
+      log.info("No principal set in the slider configuration.  Will use AM " +
+          "login identity {} to attempt keytab-based login", principal);
+    }
+
+    return principal;
+  }
+
+  public boolean isKeytabProvided() {
+    String keytabLocalPath = application.getConfiguration().getProperty(
+        SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
+    String keytabName = application.getConfiguration().getProperty(
+        SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
+    return StringUtils.isNotBlank(keytabLocalPath)
+        || StringUtils.isNotBlank(keytabName);
+
+  }
+
+  public File getKeytabFile()
+      throws SliderException, IOException {
+    //TODO implement this for dash semantic
+    String keytabFullPath = application.getConfiguration().getProperty(
+        SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
+    File localKeytabFile;
+    if (SliderUtils.isUnset(keytabFullPath)) {
+      // get the keytab
+      String keytabName = application.getConfiguration().getProperty(
+          SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
+      log.info("No host keytab file path specified. Will attempt to retrieve"
+               + " keytab file {} as a local resource for the container",
+               keytabName);
+      // download keytab to local, protected directory
+      localKeytabFile = new File(SliderKeys.KEYTAB_DIR, keytabName);
+    } else {
+      log.info("Using host keytab file {} for login", keytabFullPath);
+      localKeytabFile = new File(keytabFullPath);
+    }
+    return localKeytabFile;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
index 3d73f3b..43c7ead 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
@@ -38,6 +38,7 @@ import org.apache.slider.api.ClusterNode;
 import org.apache.slider.api.InternalKeys;
 import org.apache.slider.api.StatusKeys;
 import org.apache.slider.api.proto.Messages;
+import org.apache.slider.api.proto.Messages.ComponentCountProto;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.ApplicationState;
 import org.apache.slider.api.resource.Component;
@@ -219,7 +220,13 @@ public class AppState {
     return roleStatusMap;
   }
   
+  protected Map<String, ProviderRole> getRoleMap() {
+    return roles;
+  }
 
+  public Map<Integer, ProviderRole> getRolePriorityMap() {
+    return rolePriorityMap;
+  }
 
   private Map<ContainerId, RoleInstance> getStartingContainers() {
     return startingContainers;
@@ -257,6 +264,11 @@ public class AppState {
     return roleHistory;
   }
 
+  @VisibleForTesting
+  public void setRoleHistory(RoleHistory roleHistory) {
+    this.roleHistory = roleHistory;
+  }
+
   /**
    * Get the path used for history files
    * @return the directory used for history files
@@ -306,6 +318,15 @@ public class AppState {
     appMetrics
         .tag("appId", "Application id for service", app.getId());
 
+    org.apache.slider.api.resource.Configuration conf = app.getConfiguration();
+    startTimeThreshold =
+        conf.getPropertyLong(InternalKeys.INTERNAL_CONTAINER_FAILURE_SHORTLIFE,
+            InternalKeys.DEFAULT_INTERNAL_CONTAINER_FAILURE_SHORTLIFE);
+    failureThreshold = conf.getPropertyInt(CONTAINER_FAILURE_THRESHOLD,
+        DEFAULT_CONTAINER_FAILURE_THRESHOLD);
+    nodeFailureThreshold = conf.getPropertyInt(NODE_FAILURE_THRESHOLD,
+        DEFAULT_NODE_FAILURE_THRESHOLD);
+
     //build the initial role list
     List<ProviderRole> roleList = new ArrayList<>(binding.roles);
     for (ProviderRole providerRole : roleList) {
@@ -314,6 +335,7 @@ public class AppState {
 
     int priority = 1;
     for (Component component : app.getComponents()) {
+      priority = getNewPriority(priority);
       String name = component.getName();
       if (roles.containsKey(name)) {
         continue;
@@ -324,22 +346,13 @@ public class AppState {
       }
       log.info("Adding component: " + name);
       ProviderRole dynamicRole =
-          createComponent(name, name, component, priority++);
+          createComponent(name, name, component, priority);
       buildRole(dynamicRole);
       roleList.add(dynamicRole);
     }
     //then pick up the requirements
     buildRoleRequirementsFromResources();
 
-    org.apache.slider.api.resource.Configuration conf = app.getConfiguration();
-    startTimeThreshold =
-        conf.getPropertyLong(InternalKeys.INTERNAL_CONTAINER_FAILURE_SHORTLIFE,
-            InternalKeys.DEFAULT_INTERNAL_CONTAINER_FAILURE_SHORTLIFE);
-    failureThreshold = (int) conf.getPropertyLong(CONTAINER_FAILURE_THRESHOLD,
-        DEFAULT_CONTAINER_FAILURE_THRESHOLD);
-    nodeFailureThreshold = (int) conf.getPropertyLong(NODE_FAILURE_THRESHOLD,
-        DEFAULT_NODE_FAILURE_THRESHOLD);
-
     // set up the role history
     roleHistory = new RoleHistory(roleStatusMap.values(), recordFactory);
     roleHistory.onStart(binding.fs, binding.historyPath);
@@ -359,34 +372,47 @@ public class AppState {
   //TODO WHY do we need to create the component for AM ?
   public ProviderRole createComponent(String name, String group,
       Component component, int priority) throws BadConfigException {
-
     org.apache.slider.api.resource.Configuration conf =
         component.getConfiguration();
     long placementTimeout = conf.getPropertyLong(PLACEMENT_ESCALATE_DELAY,
         DEFAULT_PLACEMENT_ESCALATE_DELAY_SECONDS);
     long placementPolicy = conf.getPropertyLong(COMPONENT_PLACEMENT_POLICY,
         PlacementPolicy.DEFAULT);
-    int threshold = (int) conf
-        .getPropertyLong(NODE_FAILURE_THRESHOLD, nodeFailureThreshold);
+    int threshold = conf.getPropertyInt(NODE_FAILURE_THRESHOLD,
+        nodeFailureThreshold);
+    String label = conf.getProperty(YARN_LABEL_EXPRESSION,
+        DEF_YARN_LABEL_EXPRESSION);
     ProviderRole newRole =
         new ProviderRole(name, group, priority, (int)placementPolicy, threshold,
-            placementTimeout, "", component);
+            placementTimeout, label, component);
 
     log.info("Created a new role " + newRole);
     return newRole;
   }
 
-  public synchronized void updateComponents(
-      Messages.FlexComponentRequestProto requestProto)
-      throws BadConfigException {
+  @VisibleForTesting
+  public synchronized List<ProviderRole> updateComponents(Map<String, Long>
+      componentCounts) throws BadConfigException {
     for (Component component : app.getComponents()) {
-      if (component.getName().equals(requestProto.getName())) {
-        component
-            .setNumberOfContainers((long) requestProto.getNumberOfContainers());
+      if (componentCounts.containsKey(component.getName())) {
+        component.setNumberOfContainers(componentCounts.get(component
+            .getName()));
       }
     }
     //TODO update cluster description
-    buildRoleRequirementsFromResources();
+    return buildRoleRequirementsFromResources();
+  }
+
+  public synchronized List<ProviderRole> updateComponents(
+      Messages.FlexComponentsRequestProto requestProto)
+      throws BadConfigException {
+    Map<String, Long> componentCounts = new HashMap<>();
+    for (ComponentCountProto componentCount : requestProto
+        .getComponentsList()) {
+      componentCounts.put(componentCount.getName(), componentCount
+          .getNumberOfContainers());
+    }
+    return updateComponents(componentCounts);
   }
 
   /**
@@ -445,10 +471,8 @@ public class AppState {
     // now the dynamic ones. Iterate through the the cluster spec and
     // add any role status entries not in the role status
 
-    List<RoleStatus> list = new ArrayList<>(getRoleStatusMap().values());
-    for (RoleStatus roleStatus : list) {
-      String name = roleStatus.getName();
-      Component component = roleStatus.getProviderRole().component;
+    for (Component component : app.getComponents()) {
+      String name = component.getName();
       if (roles.containsKey(name)) {
         continue;
       }
@@ -460,10 +484,12 @@ public class AppState {
           groupCount = groupCounts.get(name);
         }
         for (int i = groupCount + 1; i <= desiredInstanceCount; i++) {
-          int priority = roleStatus.getPriority();
           // this is a new instance of an existing group
           String newName = String.format("%s%d", name, i);
-          int newPriority = getNewPriority(priority + i - 1);
+          if (roles.containsKey(newName)) {
+            continue;
+          }
+          int newPriority = getNewPriority(i);
           log.info("Adding new role {}", newName);
           ProviderRole dynamicRole =
               createComponent(newName, name, component, newPriority);
@@ -477,11 +503,12 @@ public class AppState {
         }
       } else {
         // this is a new value
-        log.info("Adding new role {}", name);
+        log.info("Adding new role {}, num containers {}", name,
+            component.getNumberOfContainers());
         ProviderRole dynamicRole =
-            createComponent(name, name, component, roleStatus.getPriority());
+            createComponent(name, name, component, getNewPriority(1));
         RoleStatus newRole = buildRole(dynamicRole);
-        incDesiredContainers(roleStatus,
+        incDesiredContainers(newRole,
             component.getNumberOfContainers().intValue());
         log.info("New role {}", newRole);
         if (roleHistory != null) {
@@ -518,7 +545,8 @@ public class AppState {
     if (roleStatusMap.containsKey(priority)) {
       throw new BadConfigException("Duplicate Provider Key: %s and %s",
                                    providerRole,
-                                   roleStatusMap.get(priority));
+                                   roleStatusMap.get(priority)
+                                       .getProviderRole());
     }
     RoleStatus roleStatus = new RoleStatus(providerRole);
     roleStatusMap.put(priority, roleStatus);
@@ -536,6 +564,8 @@ public class AppState {
   private void buildRoleResourceRequirements() {
     for (RoleStatus role : roleStatusMap.values()) {
       role.setResourceRequirements(buildResourceRequirements(role));
+      log.info("Setting resource requirements for {} to {}", role.getName(),
+          role.getResourceRequirements());
     }
   }
   /**
@@ -827,7 +857,6 @@ public class AppState {
    * @return the container request to submit or null if there is none
    */
   private AMRMClient.ContainerRequest createContainerRequest(RoleStatus role) {
-    incPendingContainers(role);
     if (role.isAntiAffinePlacement()) {
       return createAAContainerRequest(role);
     } else {
@@ -857,28 +886,58 @@ public class AppState {
     return request.getIssuedRequest();
   }
 
-  private void incPendingContainers(RoleStatus role) {
-    role.getComponentMetrics().containersPending.incr();
-    appMetrics.containersPending.incr();
+  @VisibleForTesting
+  public void incRequestedContainers(RoleStatus role) {
+    log.info("Incrementing requested containers for {}", role.getName());
+    role.getComponentMetrics().containersRequested.incr();
+    appMetrics.containersRequested.incr();
   }
 
-  private void decPendingContainers(RoleStatus role) {
-    decPendingContainers(role, 1);
+  private void decRequestedContainers(RoleStatus role) {
+    role.getComponentMetrics().containersRequested.decr();
+    appMetrics.containersRequested.decr();
+    log.info("Decrementing requested containers for {} by {} to {}", role
+        .getName(), 1, role.getComponentMetrics().containersRequested.value());
   }
 
-  private void decPendingContainers(RoleStatus role, int n) {
-    role.getComponentMetrics().containersPending.decr(n);;
-    appMetrics.containersPending.decr(n);
+  private int decRequestedContainersToFloor(RoleStatus role, int delta) {
+    int actual = decMetricToFloor(role.getComponentMetrics()
+        .containersRequested, delta);
+    appMetrics.containersRequested.decr(actual);
+    log.info("Decrementing requested containers for {} by {} to {}", role
+            .getName(), actual, role.getComponentMetrics().containersRequested
+        .value());
+    return actual;
   }
 
+  private int decAAPendingToFloor(RoleStatus role, int delta) {
+    int actual = decMetricToFloor(role.getComponentMetrics()
+        .pendingAAContainers, delta);
+    appMetrics.pendingAAContainers.decr(actual);
+    log.info("Decrementing AA pending containers for {} by {} to {}", role
+        .getName(), actual, role.getComponentMetrics().pendingAAContainers
+        .value());
+    return actual;
+  }
 
-  private void incRunningContainers(RoleStatus role) {
-    role.getComponentMetrics().containersRunning.incr();;
+  private int decMetricToFloor(MutableGaugeInt metric, int delta) {
+    int currentValue = metric.value();
+    int decrAmount = delta;
+    if (currentValue - delta < 0) {
+      decrAmount = currentValue;
+    }
+    metric.decr(decrAmount);
+    return decrAmount;
+  }
+
+  @VisibleForTesting
+  public void incRunningContainers(RoleStatus role) {
+    role.getComponentMetrics().containersRunning.incr();
     appMetrics.containersRunning.incr();
   }
 
   private void decRunningContainers(RoleStatus role) {
-    role.getComponentMetrics().containersRunning.decr();;
+    role.getComponentMetrics().containersRunning.decr();
     appMetrics.containersRunning.decr();
   }
 
@@ -902,26 +961,47 @@ public class AppState {
     appMetrics.containersCompleted.incr();
   }
 
-  private void incFailedContainers(RoleStatus role, ContainerOutcome outcome) {
-    role.getComponentMetrics().containersFailed.incr();
-    appMetrics.containersFailed.incr();
+  @VisibleForTesting
+  public void incFailedContainers(RoleStatus role, ContainerOutcome outcome) {
     switch (outcome) {
     case Preempted:
       appMetrics.containersPreempted.incr();
       role.getComponentMetrics().containersPreempted.incr();
       break;
+    case Disk_failure:
+      appMetrics.containersDiskFailure.incr();
+      appMetrics.containersFailed.incr();
+      role.getComponentMetrics().containersDiskFailure.incr();
+      role.getComponentMetrics().containersFailed.incr();
+      break;
     case Failed:
       appMetrics.failedSinceLastThreshold.incr();
+      appMetrics.containersFailed.incr();
+      role.getComponentMetrics().failedSinceLastThreshold.incr();
+      role.getComponentMetrics().containersFailed.incr();
+      break;
+    case Failed_limits_exceeded:
+      appMetrics.containersLimitsExceeded.incr();
+      appMetrics.failedSinceLastThreshold.incr();
+      appMetrics.containersFailed.incr();
+      role.getComponentMetrics().containersLimitsExceeded.incr();
+      role.getComponentMetrics().failedSinceLastThreshold.incr();
+      role.getComponentMetrics().containersFailed.incr();
       break;
     default:
+      appMetrics.failedSinceLastThreshold.incr();
+      appMetrics.containersFailed.incr();
+      role.getComponentMetrics().failedSinceLastThreshold.incr();
+      role.getComponentMetrics().containersFailed.incr();
       break;
     }
   }
 
   /**
-   * Build up the resource requirements for this role from the
-   * cluster specification, including substituing max allowed values
-   * if the specification asked for it.
+   * Build up the resource requirements for this role from the cluster
+   * specification, including substituting max allowed values if the
+   * specification asked for it (except when
+   * {@link ResourceKeys#YARN_RESOURCE_NORMALIZATION_ENABLED} is set to false).
    * @param role role
    * during normalization
    */
@@ -934,17 +1014,36 @@ public class AppState {
       // TODO why do we need to create the component for AM ?
       return Resource.newInstance(1, 512);
     }
-    int cores = Math.min(containerMaxCores, component.getResource().getCpus());
+    int cores = DEF_YARN_CORES;
+    if (component.getResource() != null && component.getResource().getCpus()
+        != null) {
+      cores = Math.min(containerMaxCores, component.getResource().getCpus());
+    }
     if (cores <= 0) {
       cores = DEF_YARN_CORES;
     }
-    long mem = Math.min(containerMaxMemory,
-        Long.parseLong(component.getResource().getMemory()));
+    long rawMem = DEF_YARN_MEMORY;
+    if (component.getResource() != null && component.getResource().getMemory()
+        != null) {
+      if (YARN_RESOURCE_MAX.equals(component.getResource().getMemory())) {
+        rawMem = containerMaxMemory;
+      } else {
+        rawMem = Long.parseLong(component.getResource().getMemory());
+      }
+    }
+    boolean normalize = component.getConfiguration().getPropertyBool(
+        YARN_RESOURCE_NORMALIZATION_ENABLED, true);
+    if (!normalize) {
+      log.info("Resource normalization: disabled");
+      log.debug("Component {} has RAM={}, vCores={}", name, rawMem, cores);
+      return Resources.createResource(rawMem, cores);
+    }
+    long mem = Math.min(containerMaxMemory, rawMem);
     if (mem <= 0) {
       mem = DEF_YARN_MEMORY;
     }
     Resource capability = Resource.newInstance(mem, cores);
-    log.debug("Component {} has RAM={}, vCores ={}", name, mem, cores);
+    log.debug("Component {} has RAM={}, vCores={}", name, mem, cores);
     Resource normalized = recordFactory.normalize(capability, minResource,
         maxResource);
     if (!Resources.equals(normalized, capability)) {
@@ -1060,7 +1159,7 @@ public class AppState {
       log.debug("Created {} cancel requests", operations.size());
       return new NodeUpdatedOutcome(true, operations);
     }
-    return new NodeUpdatedOutcome(false, new ArrayList<AbstractRMOperation>(0));
+    return new NodeUpdatedOutcome(false, new ArrayList<>(0));
   }
 
   /**
@@ -1203,7 +1302,6 @@ public class AppState {
             message = String.format("Failure %s (%d)", containerId, exitStatus);
           }
           roleStatus.noteFailed(message);
-          incFailedContainers(roleStatus, result.outcome);
           long failed =
               roleStatus.getComponentMetrics().containersFailed.value();
           log.info("Current count of failed role[{}] {} =  {}",
@@ -1409,7 +1507,7 @@ public class AppState {
           role.getName(), failures, threshold);
     }
 
-    if (failures > threshold) {
+    if (threshold > 0 && failures > threshold) {
       throw new TriggerClusterTeardownException(
           SliderExitCodes.EXIT_DEPLOYMENT_FAILED, FinalApplicationStatus.FAILED,
           ErrorStrings.E_UNSTABLE_CLUSTER
@@ -1428,7 +1526,7 @@ public class AppState {
   private int getFailureThresholdForRole(RoleStatus roleStatus) {
     return (int) roleStatus.getProviderRole().component.getConfiguration()
         .getPropertyLong(CONTAINER_FAILURE_THRESHOLD,
-            DEFAULT_CONTAINER_FAILURE_THRESHOLD);
+            failureThreshold);
   }
 
 
@@ -1497,7 +1595,8 @@ public class AppState {
     }
 
     log.info("Reviewing {} : ", role);
-    log.debug("Expected {}, Delta: {}", expected, delta);
+    log.debug("Expected {}, Requested/Running {}, Delta: {}", expected,
+        role.getActualAndRequested(), delta);
     checkFailureThreshold(role);
 
     if (expected < 0 ) {
@@ -1526,7 +1625,7 @@ public class AppState {
               pending--;
               log.info("Starting an anti-affine request sequence for {} nodes; pending={}",
                 delta, pending);
-              addContainerRequest(operations, request);
+              addContainerRequest(operations, request, role);
             } else {
               log.info("No location for anti-affine request");
             }
@@ -1536,12 +1635,12 @@ public class AppState {
         }
         log.info("Setting pending to {}", pending);
         //TODO
-        role.setAAPending((int)pending);
+        role.setAAPending(pending);
       } else {
 
         for (int i = 0; i < delta; i++) {
           //get the role history to select a suitable node, if available
-          addContainerRequest(operations, createContainerRequest(role));
+          addContainerRequest(operations, createContainerRequest(role), role);
         }
       }
     } else if (delta < 0) {
@@ -1552,25 +1651,35 @@ public class AppState {
       long excess = -delta;
 
       // how many requests are outstanding? for AA roles, this includes pending
-      long outstandingRequests = role.getPending() + role.getAAPending();
+      long outstandingRequests = role.getRequested() + role.getAAPending();
       if (outstandingRequests > 0) {
         // outstanding requests.
         int toCancel = (int)Math.min(outstandingRequests, excess);
 
+        int pendingCancelled = 0;
+        if (role.getAAPending() > 0) {
+          pendingCancelled = decAAPendingToFloor(role, toCancel);
+        }
+        int remainingToCancel = toCancel - pendingCancelled;
+
         // Delegate to Role History
-        List<AbstractRMOperation> cancellations = roleHistory.cancelRequestsForRole(role, toCancel);
+        List<AbstractRMOperation> cancellations = roleHistory
+            .cancelRequestsForRole(role, remainingToCancel);
         log.info("Found {} outstanding requests to cancel", cancellations.size());
         operations.addAll(cancellations);
-        if (toCancel != cancellations.size()) {
+        if (remainingToCancel != cancellations.size()) {
           log.error("Tracking of outstanding requests is not in sync with the summary statistics:" +
               " expected to be able to cancel {} requests, but got {}",
-              toCancel, cancellations.size());
+              remainingToCancel, cancellations.size());
         }
-        decPendingContainers(role, toCancel);
-        excess -= toCancel;
+
+        int requestCancelled = decRequestedContainersToFloor(role,
+            remainingToCancel);
+        excess -= pendingCancelled;
+        excess -= requestCancelled;
         assert excess >= 0 : "Attempted to cancel too many requests";
         log.info("Submitted {} cancellations, leaving {} to release",
-            toCancel, excess);
+            pendingCancelled + requestCancelled, excess);
         if (excess == 0) {
           log.info("After cancelling requests, application is now at desired size");
         }
@@ -1645,7 +1754,7 @@ public class AppState {
    * @return true if a request was added
    */
   private boolean addContainerRequest(List<AbstractRMOperation> operations,
-      AMRMClient.ContainerRequest containerAsk) {
+      AMRMClient.ContainerRequest containerAsk, RoleStatus role) {
     if (containerAsk != null) {
       log.info("Container ask is {} and label = {}", containerAsk,
           containerAsk.getNodeLabelExpression());
@@ -1654,6 +1763,7 @@ public class AppState {
         log.warn("Memory requested: {} > max of {}", askMemory, containerMaxMemory);
       }
       operations.add(new ContainerRequestOperation(containerAsk));
+      incRequestedContainers(role);
       return true;
     } else {
       return false;
@@ -1727,6 +1837,8 @@ public class AppState {
       List<Container> allocatedContainers,
       List<ContainerAssignment> assignments,
       List<AbstractRMOperation> operations) {
+    assignments.clear();
+    operations.clear();
     List<Container> ordered = roleHistory.prepareAllocationList(allocatedContainers);
     log.info("onContainersAllocated(): Total containers allocated = {}", ordered.size());
     for (Container container : ordered) {
@@ -1735,13 +1847,13 @@ public class AppState {
       //get the role
       final ContainerId cid = container.getId();
       final RoleStatus role = lookupRoleStatus(container);
-      decPendingContainers(role);
+      decRequestedContainers(role);
 
       //inc allocated count -this may need to be dropped in a moment,
       // but us needed to update the logic below
       MutableGaugeInt containersRunning = role.getComponentMetrics().containersRunning;
-      final long allocated = containersRunning.value();
       incRunningContainers(role);
+      final long allocated = containersRunning.value();
       final long desired = role.getDesired();
 
       final String roleName = role.getName();
@@ -1778,7 +1890,8 @@ public class AppState {
           if (role.getAAPending() > 0) {
             // still an outstanding AA request: need to issue a new one.
             log.info("Asking for next container for AA role {}", roleName);
-            if (!addContainerRequest(operations, createAAContainerRequest(role))) {
+            if (!addContainerRequest(operations, createAAContainerRequest(role),
+                role)) {
               log.info("No capacity in cluster for new requests");
             } else {
               role.decAAPending();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org