You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2017/06/21 18:33:55 UTC

[33/50] [abbrv] hadoop git commit: YARN-6400. Remove some unneeded code after YARN-6255. Contributed by Jian He

YARN-6400. Remove some unneeded code after YARN-6255. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c82f36c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c82f36c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c82f36c

Branch: refs/heads/yarn-native-services
Commit: 4c82f36c78ca12217dfafbf26e49704357c05b7a
Parents: 665e3fc
Author: Jian He <ji...@apache.org>
Authored: Thu Mar 30 16:07:18 2017 +0800
Committer: Jian He <ji...@apache.org>
Committed: Wed Jun 21 11:32:53 2017 -0700

----------------------------------------------------------------------
 .../apache/slider/api/ClusterDescription.java   | 795 -------------------
 .../slider/api/ClusterDescriptionKeys.java      |  25 -
 .../api/ClusterDescriptionOperations.java       |  93 ---
 .../apache/slider/api/SliderApplicationApi.java | 159 ----
 .../slider/api/SliderClusterProtocol.java       |  14 -
 .../types/ApplicationLivenessInformation.java   |   3 -
 .../slider/api/types/RestTypeMarshalling.java   |  27 -
 .../org/apache/slider/client/SliderClient.java  | 437 +++-------
 .../client/ipc/SliderApplicationIpcClient.java  | 234 ------
 .../client/ipc/SliderClusterOperations.java     | 127 +--
 .../slider/client/rest/RestClientFactory.java   |  89 ---
 .../rest/SliderApplicationApiRestClient.java    | 289 -------
 .../AbstractClusterBuildingActionArgs.java      |  11 -
 .../slider/common/tools/CoreFileSystem.java     |  74 --
 .../apache/slider/common/tools/SliderUtils.java | 116 ---
 .../slider/core/buildutils/InstanceBuilder.java | 520 ------------
 .../slider/core/buildutils/InstanceIO.java      |  83 --
 .../conf/AbstractInputPropertiesValidator.java  |  49 --
 .../apache/slider/core/conf/AggregateConf.java  | 198 -----
 .../org/apache/slider/core/conf/ConfTree.java   | 101 ---
 .../slider/core/conf/ConfTreeOperations.java    | 527 ------------
 .../core/conf/InputPropertiesValidator.java     |  27 -
 .../conf/ResourcesInputPropertiesValidator.java |  41 -
 .../conf/TemplateInputPropertiesValidator.java  |  38 -
 .../slider/core/launch/AbstractLauncher.java    |  55 --
 .../core/persist/AggregateConfSerDeser.java     |  55 --
 .../slider/core/persist/ConfPersister.java      | 286 -------
 .../slider/core/persist/ConfTreeSerDeser.java   |  54 --
 .../persist/LockAcquireFailedException.java     |  40 -
 .../providers/AbstractClientProvider.java       | 167 +---
 .../slider/providers/ProviderService.java       |  11 -
 .../providers/docker/DockerClientProvider.java  |  18 -
 .../server/appmaster/SliderAppMaster.java       |  87 +-
 .../appmaster/actions/ActionFlexCluster.java    |   1 -
 .../rpc/SliderClusterProtocolPBImpl.java        |  69 --
 .../rpc/SliderClusterProtocolProxy.java         |  63 --
 .../server/appmaster/rpc/SliderIPCService.java  |  68 --
 .../security/SecurityConfiguration.java         | 237 +++---
 .../slider/server/appmaster/state/AppState.java |  63 --
 .../appmaster/web/rest/AMWebServices.java       |   9 +-
 .../resources/AggregateModelRefresher.java      |  43 -
 .../application/resources/AppconfRefresher.java |  52 --
 .../resources/LiveComponentsRefresher.java      |  39 -
 .../resources/LiveContainersRefresher.java      |  52 --
 .../resources/LiveNodesRefresher.java           |  41 -
 .../web/rest/management/ManagementResource.java |  94 ---
 .../management/resources/ActionsResource.java   |  22 -
 .../resources/AggregateConfResource.java        |  90 ---
 .../management/resources/ComponentResource.java |  53 --
 .../management/resources/ConfTreeResource.java  |  69 --
 .../management/resources/ResourceFactory.java   |  47 --
 .../web/rest/publisher/PublisherResource.java   |   7 +-
 .../src/main/proto/SliderClusterProtocol.proto  |  28 -
 53 files changed, 239 insertions(+), 5758 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java
deleted file mode 100644
index f8e5e7c..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java
+++ /dev/null
@@ -1,795 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.slider.api;
-
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.slider.api.types.ApplicationLivenessInformation;
-import org.apache.slider.common.tools.SliderUtils;
-import org.apache.slider.core.exceptions.BadConfigException;
-import org.apache.slider.providers.SliderProviderFactory;
-import org.codehaus.jackson.JsonGenerationException;
-import org.codehaus.jackson.JsonParseException;
-import org.codehaus.jackson.annotate.JsonIgnore;
-import org.codehaus.jackson.annotate.JsonIgnoreProperties;
-import org.codehaus.jackson.map.JsonMappingException;
-import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.SerializationConfig;
-import org.codehaus.jackson.map.annotate.JsonSerialize;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import static org.apache.slider.api.OptionKeys.INTERNAL_APPLICATION_HOME;
-import static org.apache.slider.api.OptionKeys.INTERNAL_APPLICATION_IMAGE_PATH;
-import static org.apache.slider.api.OptionKeys.ZOOKEEPER_PATH;
-import static org.apache.slider.api.OptionKeys.ZOOKEEPER_QUORUM;
-
-/**
- * Represents a cluster specification; designed to be sendable over the wire
- * and persisted in JSON by way of Jackson.
- * 
- * When used in cluster status operations the <code>info</code>
- * and <code>statistics</code> maps contain information about the cluster.
- * 
- * As a wire format it is less efficient in both xfer and ser/deser than 
- * a binary format, but by having one unified format for wire and persistence,
- * the code paths are simplified.
- *
- * This was the original single-file specification/model used in the Hoya
- * precursor to Slider. Its now retained primarily as a way to publish
- * the current state of the application, or at least a fraction thereof ...
- * the larger set of information from the REST API is beyond the scope of
- * this structure.
- */
-@JsonIgnoreProperties(ignoreUnknown = true)
-@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
-
-public class ClusterDescription implements Cloneable {
-  protected static final Logger
-    log = LoggerFactory.getLogger(ClusterDescription.class);
-
-  private static final String UTF_8 = "UTF-8";
-
-  /**
-   * version counter
-   */
-  public String version = "1.0";
-
-  /**
-   * Name of the cluster
-   */
-  public String name;
-
-  /**
-   * Type of cluster
-   */
-  public String type = SliderProviderFactory.DEFAULT_CLUSTER_TYPE;
-
-  /**
-   * State of the cluster
-   */
-  public int state;
-  
-  /*
-   State list for both clusters and nodes in them. Ordered so that destroyed follows
-   stopped.
-   
-   Some of the states are only used for recording
-   the persistent state of the cluster and are not
-   seen in node descriptions
-   */
-
-  /**
-   * Specification is incomplete & cannot
-   * be used: {@value}
-   */
-  public static final int STATE_INCOMPLETE = StateValues.STATE_INCOMPLETE;
-
-  /**
-   * Spec has been submitted: {@value}
-   */
-  public static final int STATE_SUBMITTED = StateValues.STATE_SUBMITTED;
-  /**
-   * Cluster created: {@value}
-   */
-  public static final int STATE_CREATED = StateValues.STATE_CREATED;
-  /**
-   * Live: {@value}
-   */
-  public static final int STATE_LIVE = StateValues.STATE_LIVE;
-  /**
-   * Stopped
-   */
-  public static final int STATE_STOPPED = StateValues.STATE_STOPPED;
-  /**
-   * destroyed
-   */
-  public static final int STATE_DESTROYED = StateValues.STATE_DESTROYED;
-  
-  /**
-   * When was the cluster specification created?
-   * This is not the time a cluster was thawed; that will
-   * be in the <code>info</code> section.
-   */
-  public long createTime;
-
-  /**
-   * When was the cluster specification last updated
-   */
-  public long updateTime;
-
-  /**
-   * URL path to the original configuration
-   * files; these are re-read when 
-   * restoring a cluster
-   */
-
-  public String originConfigurationPath;
-
-  /**
-   * URL path to the generated configuration
-   */
-  public String generatedConfigurationPath;
-
-  /**
-   * This is where the data goes
-   */
-  public String dataPath;
-
-  /**
-   * cluster-specific options -to control both
-   * the Slider AM and the application that it deploys
-   */
-  public Map<String, String> options = new HashMap<>();
-
-  /**
-   * cluster information
-   * This is only valid when querying the cluster status.
-   */
-  public Map<String, String> info = new HashMap<>();
-
-  /**
-   * Statistics. This is only relevant when querying the cluster status
-   */
-  public Map<String, Map<String, Integer>> statistics = new HashMap<>();
-
-  /**
-   * Instances: role->count
-   */
-  public Map<String, List<String>> instances = new HashMap<>();
-
-  /**
-   * Role options, 
-   * role -> option -> value
-   */
-  public Map<String, Map<String, String>> roles = new HashMap<>();
-
-
-  /**
-   * List of key-value pairs to add to a client config to set up the client
-   */
-  public Map<String, String> clientProperties = new HashMap<>();
-
-  /**
-   * Status information
-   */
-  public Map<String, Object> status;
-
-  /**
-   * Liveness information; the same as returned
-   * on the <code>live/liveness/</code> URL
-   */
-  public ApplicationLivenessInformation liveness;
-
-  /**
-   * Creator.
-   */
-  public ClusterDescription() {
-  }
-
-  @Override
-  public String toString() {
-    try {
-      return toJsonString();
-    } catch (Exception e) {
-      log.debug("Failed to convert CD to JSON ", e);
-      return super.toString();
-    }
-  }
-
-  /**
-   * Shallow clone
-   * @return a shallow clone
-   * @throws CloneNotSupportedException
-   */
-  @Override
-  public Object clone() throws CloneNotSupportedException {
-    return super.clone();
-  }
-
-  /**
-   * A deep clone of the spec. This is done inefficiently with a ser/derser
-   * @return the cluster description
-   */
-  public ClusterDescription deepClone() {
-    try {
-      return fromJson(toJsonString());
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-
-  /**
-   * Save a cluster description to a hadoop filesystem
-   * @param fs filesystem
-   * @param path path
-   * @param overwrite should any existing file be overwritten
-   * @throws IOException IO exception
-   */
-  public void save(FileSystem fs, Path path, boolean overwrite) throws
-                                                                IOException {
-    FSDataOutputStream dataOutputStream = fs.create(path, overwrite);
-    writeJsonAsBytes(dataOutputStream);
-  }
-  
-  /**
-   * Save a cluster description to the local filesystem
-   * @param file file
-   * @throws IOException IO excpetion
-   */
-  public void save(File file) throws IOException {
-    log.debug("Saving to {}", file.getAbsolutePath());
-    if (!file.getParentFile().mkdirs()) {
-      log.warn("Failed to mkdirs for {}", file.getParentFile());
-    }
-    DataOutputStream dataOutputStream = new DataOutputStream(new FileOutputStream(file));
-    writeJsonAsBytes(dataOutputStream);
-  }
-
-  /**
-   * Write the json as bytes -then close the file
-   * @param dataOutputStream an outout stream that will always be closed
-   * @throws IOException any failure
-   */
-  private void writeJsonAsBytes(DataOutputStream dataOutputStream)
-      throws IOException {
-    try {
-      String json = toJsonString();
-      byte[] b = json.getBytes(UTF_8);
-      dataOutputStream.write(b);
-    } finally {
-      dataOutputStream.close();
-    }
-  }
-
-  /**
-   * Load from the filesystem
-   * @param fs filesystem
-   * @param path path
-   * @return a loaded CD
-   * @throws IOException IO problems
-   */
-  public static ClusterDescription load(FileSystem fs, Path path)
-      throws IOException, JsonParseException, JsonMappingException {
-    FileStatus status = fs.getFileStatus(path);
-    byte[] b = new byte[(int) status.getLen()];
-    FSDataInputStream dataInputStream = fs.open(path);
-    int count = dataInputStream.read(b);
-    String json = new String(b, 0, count, UTF_8);
-    return fromJson(json);
-  }
-
-  /**
-   * Make a deep copy of the class
-   * @param source source
-   * @return the copy
-   */
-  public static ClusterDescription copy(ClusterDescription source) {
-    //currently the copy is done by a generate/save. Inefficient but it goes
-    //down the tree nicely
-    try {
-      return fromJson(source.toJsonString());
-    } catch (IOException e) {
-      throw new RuntimeException("ClusterDescription copy failed " + e, e);
-    }
-  }
-
-  /**
-   * Convert to a JSON string
-   * @return a JSON string description
-   * @throws IOException Problems mapping/writing the object
-   */
-  public String toJsonString() throws IOException,
-                                      JsonGenerationException,
-                                      JsonMappingException {
-    ObjectMapper mapper = new ObjectMapper();
-    mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true);
-    return mapper.writeValueAsString(this);
-  }
-
-  /**
-   * Convert from JSON
-   * @param json input
-   * @return the parsed JSON
-   * @throws IOException IO
-   * @throws JsonMappingException failure to map from the JSON to this class
-   */
-  public static ClusterDescription fromJson(String json)
-    throws IOException, JsonParseException, JsonMappingException {
-    ObjectMapper mapper = new ObjectMapper();
-    try {
-      return mapper.readValue(json, ClusterDescription.class);
-    } catch (IOException e) {
-      log.error("Exception while parsing json : " + e + "\n" + json, e);
-      throw e;
-    }
-  }
-
-    /**
-     * Convert from input stream
-     * @param is input stream of cluster description
-     * @return the parsed JSON
-     * @throws IOException IO
-     * @throws JsonMappingException failure to map from the JSON to this class
-     */
-    public static ClusterDescription fromStream(InputStream is)
-            throws IOException, JsonParseException, JsonMappingException {
-        if (is==null) {
-          throw new FileNotFoundException("Empty Stream");
-        }
-        ObjectMapper mapper = new ObjectMapper();
-        try {
-            return mapper.readValue(is, ClusterDescription.class);
-        } catch (IOException e) {
-            log.error("Exception while parsing input stream : {}", e, e);
-      throw e;
-    }
-  }
-
-  /**
-   * Convert from a JSON file
-   * @param jsonFile input file
-   * @return the parsed JSON
-   * @throws IOException IO problems
-   * @throws JsonMappingException failure to map from the JSON to this class
-   */
-  public static ClusterDescription fromFile(File jsonFile)
-    throws IOException, JsonParseException, JsonMappingException {
-    ObjectMapper mapper = new ObjectMapper();
-    try {
-      return mapper.readValue(jsonFile, ClusterDescription.class);
-    } catch (IOException e) {
-      log.error("Exception while parsing json file {}" , jsonFile, e);
-      throw e;
-    }
-  }
-
-  /**
-   * Set a cluster option: a key val pair in the options {} section
-   * @param key key option name
-   * @param val value option value
-   */
-  public void setOption(String key, String val) {
-    options.put(key, val);
-  }
-
-  /**
-   * Set a cluster option if it is unset. If it is already set,
-   * in the Cluster Description, it is left alone
-   * @param key key key to query/set
-   * @param val value value
-   */
-
-  public void setOptionifUnset(String key, String val) {
-    if (options.get(key) == null) {
-      options.put(key, val);
-    }
-  }
-
-  /**
-   * Set an integer option -it's converted to a string before saving
-   * @param option option name
-   * @param val integer value
-   */
-  public void setOption(String option, int val) {
-    setOption(option, Integer.toString(val));
-  }
-
-  /**
-   * Set a boolean option
-   * @param option option name
-   * @param val bool value
-   */
-  public void setOption(String option, boolean val) {
-    setOption(option, Boolean.toString(val));
-  }
-
-  /**
-   * Get a cluster option or value
-   *
-   * @param key option key
-   * @param defVal option val
-   * @return resolved value or default
-   */
-  public String getOption(String key, String defVal) {
-    String val = options.get(key);
-    return val != null ? val : defVal;
-  }
-
-  /**
-   * Get a cluster option or value
-   *
-   * @param key mandatory key
-   * @return the value
-   * @throws BadConfigException if the option is missing
-   */
-  public String getMandatoryOption(String key) throws BadConfigException {
-    String val = options.get(key);
-    if (val == null) {
-      throw new BadConfigException("Missing option " + key);
-    }
-    return val;
-  }
-
-  /**
-   * Get an integer option; use {@link Integer#decode(String)} so as to take hex
-   * oct and bin values too.
-   *
-   * @param option option name
-   * @param defVal default value
-   * @return parsed value
-   * @throws NumberFormatException if the role could not be parsed.
-   */
-  public int getOptionInt(String option, int defVal) {
-    String val = getOption(option, Integer.toString(defVal));
-    return Integer.decode(val);
-  }
-
-  /**
-   * Verify that an option is set: that is defined AND non-empty
-   * @param key key to verify
-   * @throws BadConfigException
-   */
-  public void verifyOptionSet(String key) throws BadConfigException {
-    if (SliderUtils.isUnset(getOption(key, null))) {
-      throw new BadConfigException("Unset cluster option %s", key);
-    }
-  }
-
-  /**
-   * Get an option as a boolean. Note that {@link Boolean#valueOf(String)}
-   * is used for parsing -its policy of what is true vs false applies.
-   * @param option name
-   * @param defVal default
-   * @return the option.
-   */
-  public boolean getOptionBool(String option, boolean defVal) {
-    return Boolean.valueOf(getOption(option, Boolean.toString(defVal)));
-  }
-
-  /**
-   * Get a role option
-   * @param role role to get from
-   * @param option option name
-   * @param defVal default value
-   * @return resolved value
-   */
-  public String getRoleOpt(String role, String option, String defVal) {
-    Map<String, String> roleopts = getRole(role);
-    if (roleopts == null) {
-      return defVal;
-    }
-    String val = roleopts.get(option);
-    return val != null ? val : defVal;
-  }
-
-  /**
-   * Get a mandatory role option
-   * @param role role to get from
-   * @param option option name
-   * @return resolved value
-   * @throws BadConfigException if the option is not defined
-   */
-  public String getMandatoryRoleOpt(String role, String option) throws
-                                                                BadConfigException {
-    Map<String, String> roleopts = getRole(role);
-    if (roleopts == null) {
-      throw new BadConfigException("Missing role %s ", role);
-    }
-    String val = roleopts.get(option);
-    if (val == null) {
-      throw new BadConfigException("Missing option '%s' in role %s ", option, role);
-    }
-    return val;
-  }
-
-  /**
-   * Get a mandatory integer role option
-   * @param role role to get from
-   * @param option option name
-   * @return resolved value
-   * @throws BadConfigException if the option is not defined
-   */
-  public int getMandatoryRoleOptInt(String role, String option)
-      throws BadConfigException {
-    getMandatoryRoleOpt(role, option);
-    return getRoleOptInt(role, option, 0);
-  }
-  
-  /**
-   * look up a role and return its options
-   * @param role role
-   * @return role mapping or null
-   */
-  public Map<String, String> getRole(String role) {
-    return roles.get(role);
-  }
-
-  /**
-   * Get a role -adding it to the roleopts map if
-   * none with that name exists
-   * @param role role
-   * @return role mapping
-   */
-  public Map<String, String> getOrAddRole(String role) {
-    Map<String, String> map = getRole(role);
-    if (map == null) {
-      map = new HashMap<>();
-    }
-    roles.put(role, map);
-    return map;
-  }
-  
-  /*
-   * return the Set of role names
-   */
-  @JsonIgnore
-  public Set<String> getRoleNames() {
-    return new HashSet<>(roles.keySet());
-  }
-
-  /**
-   * Get a role whose presence is mandatory
-   * @param role role name
-   * @return the mapping
-   * @throws BadConfigException if the role is not there
-   */
-  public Map<String, String> getMandatoryRole(String role) throws
-                                                           BadConfigException {
-    Map<String, String> roleOptions = getRole(role);
-    if (roleOptions == null) {
-      throw new BadConfigException("Missing role " + role);
-    }
-    return roleOptions;
-  }
-
-  /**
-   * Get an integer role option; use {@link Integer#decode(String)} so as to take hex
-   * oct and bin values too.
-   *
-   * @param role role to get from
-   * @param option option name
-   * @param defVal default value
-   * @return parsed value
-   * @throws NumberFormatException if the role could not be parsed.
-   */
-  public int getRoleOptInt(String role, String option, int defVal) {
-    String val = getRoleOpt(role, option, Integer.toString(defVal));
-    return Integer.decode(val);
-  }
-
-  /**
-   * Get an integer role option; use {@link Integer#decode(String)} so as to take hex
-   * oct and bin values too.
-   *
-   * @param role role to get from
-   * @param option option name
-   * @param defVal default value
-   * @return parsed value
-   * @throws NumberFormatException if the role could not be parsed.
-   */
-  public long getRoleOptLong(String role, String option, long defVal) {
-    String val = getRoleOpt(role, option, Long.toString(defVal));
-    return Long.decode(val);
-  }
-
-  /**
-   * Set a role option, creating the role if necessary
-   * @param role role name
-   * @param option option name
-   * @param val value
-   */
-  public void setRoleOpt(String role, String option, String val) {
-    Map<String, String> roleopts = getOrAddRole(role);
-    roleopts.put(option, val);
-  }
-
-  /**
-   * Set an integer role option, creating the role if necessary
-   * @param role role name
-   * @param option option name
-   * @param val integer value
-   */
-  public void setRoleOpt(String role, String option, int val) {
-    setRoleOpt(role, option, Integer.toString(val));
-  }
-
-  /**
-   * Set a role option of any object, using its string value.
-   * This works for (Boxed) numeric values as well as other objects
-   * @param role role name
-   * @param option option name
-   * @param val non-null value
-   */
-  public void setRoleOpt(String role, String option, Object val) {
-    setRoleOpt(role, option, val.toString());
-  }
-
-  /**
-   * Get the value of a role requirement (cores, RAM, etc).
-   * These are returned as integers, but there is special handling of the 
-   * string {@link ResourceKeys#YARN_RESOURCE_MAX}, which triggers
-   * the return of the maximum value.
-   * @param role role to get from
-   * @param option option name
-   * @param defVal default value
-   * @param maxVal value to return if the max val is requested
-   * @return parsed value
-   * @throws NumberFormatException if the role could not be parsed.
-   */
-  public int getRoleResourceRequirement(String role, String option, int defVal, int maxVal) {
-    String val = getRoleOpt(role, option, Integer.toString(defVal));
-    Integer intVal;
-    if (ResourceKeys.YARN_RESOURCE_MAX.equals(val)) {
-      intVal = maxVal;
-    } else {
-      intVal = Integer.decode(val);
-    }
-    return intVal;
-  }
-
-
-  /**
-   * Set the time for an information (human, machine) timestamp pair of fields.
-   * The human time is the time in millis converted via the {@link Date} class.
-   * @param keyHumanTime name of human time key
-   * @param keyMachineTime name of machine time
-   * @param time timestamp
-   */
-  
-  public void setInfoTime(String keyHumanTime, String keyMachineTime, long time) {
-    SliderUtils.setInfoTime(info, keyHumanTime, keyMachineTime, time);
-  }
-
-  /**
-   * Set an information string. This is content that is only valid in status
-   * reports.
-   * @param key key
-   * @param value string value
-   */
-  @JsonIgnore
-  public void setInfo(String key, String value) {
-    info.put(key, value);
-  }
-
-  /**
-   * Get an information string. This is content that is only valid in status
-   * reports.
-   * @param key key
-   * @return the value or null
-   */
-  @JsonIgnore
-  public String getInfo(String key) {
-    return info.get(key);
-  }
-
-  /**
-   * Get an information string. This is content that is only valid in status
-   * reports.
-   * @param key key
-   * @return the value or null
-   */
-  @JsonIgnore
-  public boolean getInfoBool(String key) {
-    String val = info.get(key);
-    if (val != null) {
-      return Boolean.valueOf(val);
-    }
-    return false;
-  }
-
-  @JsonIgnore
-  public String getZkHosts() throws BadConfigException {
-    return getMandatoryOption(ZOOKEEPER_QUORUM);
-  }
-
-  /**
-   * Set the hosts for the ZK quorum
-   * @param zkHosts a comma separated list of hosts
-   */
-  @JsonIgnore
-  public void setZkHosts(String zkHosts) {
-    setOption(ZOOKEEPER_QUORUM, zkHosts);
-  }
-
-  @JsonIgnore
-  public String getZkPath() throws BadConfigException {
-    return getMandatoryOption(ZOOKEEPER_PATH);
-  }
-
-  @JsonIgnore
-  public void setZkPath(String zkPath) {
-    setOption(ZOOKEEPER_PATH, zkPath);
-  }
-
-  /**
-   * HBase home: if non-empty defines where a copy of HBase is preinstalled
-   */
-  @JsonIgnore
-  public String getApplicationHome() {
-    return getOption(INTERNAL_APPLICATION_HOME, "");
-  }
-
-  @JsonIgnore
-  public void setApplicationHome(String applicationHome) {
-    setOption(INTERNAL_APPLICATION_HOME, applicationHome);
-  }
-
-  /**
-   * The path in HDFS where the HBase image is
-   */
-  @JsonIgnore
-  public String getImagePath() {
-    return getOption(INTERNAL_APPLICATION_IMAGE_PATH, "");
-  }
-
-  /**
-   * Set the path in HDFS where the HBase image is
-   */
-  @JsonIgnore
-  public void setImagePath(String imagePath) {
-    setOption(INTERNAL_APPLICATION_IMAGE_PATH, imagePath);
-  }
-
-  /**
-   * Query for the image path being set (non null/non empty)
-   * @return true if there is a path in the image path option
-   */
-  @JsonIgnore
-  public boolean isImagePathSet() {
-    return SliderUtils.isSet(getImagePath());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionKeys.java
deleted file mode 100644
index 5b7a92a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionKeys.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.api;
-
-public class ClusterDescriptionKeys {
-
-  public static final String KEY_CLUSTER_LIVE = "live"; 
-  public static final String KEY_CLUSTER_FAILED = "failed"; 
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionOperations.java
deleted file mode 100644
index 5b95414..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescriptionOperations.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.api;
-
-import org.apache.slider.common.tools.SliderUtils;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTree;
-import org.apache.slider.core.conf.MapOperations;
-import org.apache.slider.core.exceptions.BadConfigException;
-import org.apache.slider.providers.SliderProviderFactory;
-
-import java.util.Map;
-
-import static org.apache.slider.api.OptionKeys.ZOOKEEPER_PATH;
-import static org.apache.slider.api.OptionKeys.ZOOKEEPER_QUORUM;
-
-/**
- * Operations on Cluster Descriptions
- */
-public class ClusterDescriptionOperations {
-
-
-  public static ClusterDescription buildFromInstanceDefinition(AggregateConf aggregateConf) throws
-                                                                                       BadConfigException {
-
-    ClusterDescription cd = new ClusterDescription();
-    
-    aggregateConf.resolve();
-
-    //options are a merge of all globals
-    Map<String, String> options = cd.options;
-    SliderUtils.mergeMapsIgnoreDuplicateKeys(options,
-        aggregateConf.getInternal().global);
-    SliderUtils.mergeMapsIgnoreDuplicateKeys(options,
-        aggregateConf.getAppConf().global);
-    SliderUtils.mergeMapsIgnoreDuplicateKeys(options,
-        aggregateConf.getResources().global);
-
-    //roles are the role values merged in the same order
-    mergeInComponentMap(cd, aggregateConf.getInternal());
-    mergeInComponentMap(cd, aggregateConf.getAppConf());
-    mergeInComponentMap(cd, aggregateConf.getResources());
-
-    //now add the extra bits
-    cd.state = ClusterDescription.STATE_LIVE;
-    MapOperations internalOptions =
-      aggregateConf.getInternalOperations().getGlobalOptions();
-    MapOperations appOptions =
-      aggregateConf.getAppConfOperations().getGlobalOptions();
-
-    cd.type = internalOptions.getOption(InternalKeys.INTERNAL_PROVIDER_NAME,
-                                SliderProviderFactory.DEFAULT_CLUSTER_TYPE);
-
-    cd.dataPath = internalOptions.get(InternalKeys.INTERNAL_DATA_DIR_PATH);
-    cd.name = internalOptions.get(OptionKeys.APPLICATION_NAME);
-    cd.originConfigurationPath = internalOptions.get(InternalKeys.INTERNAL_SNAPSHOT_CONF_PATH);
-    cd.generatedConfigurationPath = internalOptions.get(InternalKeys.INTERNAL_GENERATED_CONF_PATH);
-    cd.setImagePath(internalOptions.get(InternalKeys.INTERNAL_APPLICATION_IMAGE_PATH));
-    cd.setApplicationHome(internalOptions.get(InternalKeys.INTERNAL_APPLICATION_HOME));
-    cd.setZkPath(appOptions.get(ZOOKEEPER_PATH));
-    cd.setZkHosts(appOptions.get(ZOOKEEPER_QUORUM));
-    
-    return cd;
-  }
-
-  private static void mergeInComponentMap(ClusterDescription cd,
-                                          ConfTree confTree) {
-
-    Map<String, Map<String, String>> components = confTree.components;
-    for (Map.Entry<String, Map<String, String>> compEntry : components.entrySet()) {
-      String name = compEntry.getKey();
-      Map<String, String> destRole = cd.getOrAddRole(name);
-      Map<String, String> sourceComponent = compEntry.getValue();
-      SliderUtils.mergeMapsIgnoreDuplicateKeys(destRole, sourceComponent);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java
deleted file mode 100644
index f6a2cc9..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderApplicationApi.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.api;
-
-import org.apache.slider.api.types.ApplicationLivenessInformation;
-import org.apache.slider.api.types.ComponentInformation;
-import org.apache.slider.api.types.ContainerInformation;
-import org.apache.slider.api.types.NodeInformation;
-import org.apache.slider.api.types.NodeInformationList;
-import org.apache.slider.api.types.PingInformation;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTree;
-import org.apache.slider.core.conf.ConfTreeOperations;
-
-import java.io.IOException;
-import java.util.Map;
-
-/**
- * API exported by the slider remote REST/IPC endpoints.
- */
-public interface SliderApplicationApi {
-  /**
-   * Get the aggregate desired model
-   * @return the aggregate configuration of what was asked for
-   * -before resolution has taken place
-   * @throws IOException on any failure
-   */
-  AggregateConf getDesiredModel() throws IOException;
-
-  /**
-   * Get the desired application configuration
-   * @return the application configuration asked for
-   * -before resolution has taken place
-   * @throws IOException on any failure
-   */
-  ConfTreeOperations getDesiredAppconf() throws IOException;
-
-  /**
-   * Get the desired YARN resources
-   * @return the resources asked for
-   * -before resolution has taken place
-   * @throws IOException on any failure
-   */
-  ConfTreeOperations getDesiredResources() throws IOException;
-
-  /**
-   * Get the aggregate resolved model
-   * @return the aggregate configuration of what was asked for
-   * -after resolution has taken place
-   * @throws IOException on any failure
-   */
-  AggregateConf getResolvedModel() throws IOException;
-
-  /**
-   * Get the resolved application configuration
-   * @return the application configuration asked for
-   * -after resolution has taken place
-   * @throws IOException on any failure
-   */
-  ConfTreeOperations getResolvedAppconf() throws IOException;
-
-  /**
-   * Get the resolved YARN resources
-   * @return the resources asked for
-   * -after resolution has taken place
-   * @throws IOException on any failure
-   */
-  ConfTreeOperations getResolvedResources() throws IOException;
-
-  /**
-   * Get the live YARN resources
-   * @return the live set of resources in the cluster
-   * @throws IOException on any failure
-   */
-  ConfTreeOperations getLiveResources() throws IOException;
-
-  /**
-   * Get a map of live containers [containerId:info]
-   * @return a possibly empty list of serialized containers
-   * @throws IOException on any failure
-   */
-  Map<String, ContainerInformation> enumContainers() throws IOException;
-
-  /**
-   * Get a container from the container Id
-   * @param containerId YARN container ID
-   * @return the container information
-   * @throws IOException on any failure
-   */
-  ContainerInformation getContainer(String containerId) throws IOException;
-
-  /**
-   * List all components into a map of [name:info]
-   * @return a possibly empty map of components
-   * @throws IOException on any failure
-   */
-  Map<String, ComponentInformation> enumComponents() throws IOException;
-
-  /**
-   * Get information about a component
-   * @param componentName name of the component
-   * @return the component details
-   * @throws IOException on any failure
-   */
-  ComponentInformation getComponent(String componentName) throws IOException;
-
-  /**
-   * List all nodes into a map of [name:info]
-   * @return a possibly empty list of nodes
-   * @throws IOException on any failure
-   */
-  NodeInformationList getLiveNodes() throws IOException;
-
-  /**
-   * Get information about a node
-   * @param hostname name of the node
-   * @return the node details
-   * @throws IOException on any failure
-   */
-  NodeInformation getLiveNode(String hostname) throws IOException;
-
-  /**
-   * Ping as a GET
-   * @param text text to include
-   * @return the response
-   * @throws IOException on any failure
-   */
-  PingInformation ping(String text) throws IOException;
-
-  /**
-   * Stop the AM (async operation)
-   * @param text text to include
-   * @throws IOException on any failure
-   */
-  void stop(String text) throws IOException;
-
-  /**
-   * Get the application liveness
-   * @return current liveness information
-   * @throws IOException
-   */
-  ApplicationLivenessInformation getApplicationLiveness() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java
index f384927..7f768b9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/SliderClusterProtocol.java
@@ -149,18 +149,4 @@ public interface SliderClusterProtocol extends VersionedProtocol {
   Messages.NodeInformationProto getLiveNode(
       Messages.GetLiveNodeRequestProto request
   ) throws IOException;
-
-  Messages.WrappedJsonProto getModelDesired(Messages.EmptyPayloadProto request) throws IOException;
-
-  Messages.WrappedJsonProto getModelDesiredAppconf(Messages.EmptyPayloadProto request) throws IOException;
-
-  Messages.WrappedJsonProto getModelDesiredResources(Messages.EmptyPayloadProto request) throws IOException;
-
-  Messages.WrappedJsonProto getModelResolved(Messages.EmptyPayloadProto request) throws IOException;
-
-  Messages.WrappedJsonProto getModelResolvedAppconf(Messages.EmptyPayloadProto request) throws IOException;
-
-  Messages.WrappedJsonProto getModelResolvedResources(Messages.EmptyPayloadProto request) throws IOException;
-
-  Messages.WrappedJsonProto getLiveResources(Messages.EmptyPayloadProto request) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ApplicationLivenessInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ApplicationLivenessInformation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ApplicationLivenessInformation.java
index 9879d05..687edd2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ApplicationLivenessInformation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ApplicationLivenessInformation.java
@@ -36,9 +36,6 @@ public class ApplicationLivenessInformation {
   /** number of outstanding requests: those needed to satisfy */
   public int requestsOutstanding;
 
-  /** number of requests submitted to YARN */
-  public int activeRequests;
-
   @Override
   public String toString() {
     final StringBuilder sb =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/RestTypeMarshalling.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/RestTypeMarshalling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/RestTypeMarshalling.java
index 713cffd..bc3d526 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/RestTypeMarshalling.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/RestTypeMarshalling.java
@@ -19,18 +19,7 @@
 package org.apache.slider.api.types;
 
 import org.apache.slider.api.proto.Messages;
-import org.apache.slider.api.types.ApplicationLivenessInformation;
-import org.apache.slider.api.types.ComponentInformation;
-import org.apache.slider.api.types.ContainerInformation;
-import org.apache.slider.api.types.NodeEntryInformation;
-import org.apache.slider.api.types.NodeInformation;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTree;
-import org.apache.slider.core.conf.ConfTreeOperations;
-import org.apache.slider.core.persist.AggregateConfSerDeser;
-import org.apache.slider.core.persist.ConfTreeSerDeser;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -265,20 +254,4 @@ public class RestTypeMarshalling {
   public static String unmarshall(Messages.WrappedJsonProto wire) {
     return wire.getJson();
   }
-
-  public static ConfTree unmarshallToConfTree(Messages.WrappedJsonProto wire) throws
-      IOException {
-    return new ConfTreeSerDeser().fromJson(wire.getJson());
-  }
-
-  public static ConfTreeOperations unmarshallToCTO(Messages.WrappedJsonProto wire) throws
-      IOException {
-    return new ConfTreeOperations(new ConfTreeSerDeser().fromJson(wire.getJson()));
-  }
-
-  public static AggregateConf unmarshallToAggregateConf(Messages.WrappedJsonProto wire) throws
-      IOException {
-    return new AggregateConfSerDeser().fromJson(wire.getJson());
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index f4ea70b..8bceddf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -46,8 +46,6 @@ import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.KerberosDiags;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.alias.CredentialProvider;
-import org.apache.hadoop.security.alias.CredentialProviderFactory;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest;
@@ -59,8 +57,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
-import org.apache.hadoop.yarn.api.records.NodeReport;
-import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.client.api.YarnClientApplication;
@@ -72,14 +68,12 @@ import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.slider.api.ClusterNode;
-import org.apache.slider.api.SliderApplicationApi;
 import org.apache.slider.api.SliderClusterProtocol;
 import org.apache.slider.api.proto.Messages;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.Component;
 import org.apache.slider.api.types.ContainerInformation;
 import org.apache.slider.api.types.NodeInformationList;
-import org.apache.slider.client.ipc.SliderApplicationIpcClient;
 import org.apache.slider.client.ipc.SliderClusterOperations;
 import org.apache.slider.common.Constants;
 import org.apache.slider.common.SliderExitCodes;
@@ -119,9 +113,6 @@ import org.apache.slider.common.tools.ConfigHelper;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.common.tools.SliderUtils;
 import org.apache.slider.common.tools.SliderVersionInfo;
-import org.apache.slider.core.buildutils.InstanceIO;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTree;
 import org.apache.slider.core.exceptions.BadClusterStateException;
 import org.apache.slider.core.exceptions.BadCommandArgumentsException;
 import org.apache.slider.core.exceptions.BadConfigException;
@@ -178,8 +169,6 @@ import java.io.InterruptedIOException;
 import java.io.OutputStreamWriter;
 import java.io.PrintStream;
 import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.io.Writer;
 import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -199,7 +188,6 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import static org.apache.hadoop.registry.client.binding.RegistryUtils.*;
-import static org.apache.slider.api.InternalKeys.INTERNAL_APPLICATION_IMAGE_PATH;
 import static org.apache.slider.common.Constants.HADOOP_JAAS_DEBUG;
 import static org.apache.slider.common.params.SliderActions.*;
 import static org.apache.slider.common.tools.SliderUtils.*;
@@ -253,7 +241,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
    */
   private SliderYarnClientImpl yarnClient;
   private YarnAppListClient yarnAppListClient;
-  private AggregateConf launchedInstanceDefinition;
 
   /**
    * The YARN registry service
@@ -942,43 +929,43 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     return 0;
   }
 
-  protected static void checkForCredentials(Configuration conf,
-      ConfTree tree, String clusterName) throws IOException {
-    if (tree.credentials == null || tree.credentials.isEmpty()) {
-      log.info("No credentials requested");
-      return;
-    }
-
-    Console console = System.console();
-    for (Entry<String, List<String>> cred : tree.credentials.entrySet()) {
-      String provider = cred.getKey()
-          .replaceAll(Pattern.quote("${CLUSTER_NAME}"), clusterName)
-          .replaceAll(Pattern.quote("${CLUSTER}"), clusterName);
-      List<String> aliases = cred.getValue();
-      if (aliases == null || aliases.isEmpty()) {
-        continue;
-      }
-      Configuration c = new Configuration(conf);
-      c.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, provider);
-      CredentialProvider credentialProvider = CredentialProviderFactory.getProviders(c).get(0);
-      Set<String> existingAliases = new HashSet<>(credentialProvider.getAliases());
-      for (String alias : aliases) {
-        if (existingAliases.contains(alias.toLowerCase(Locale.ENGLISH))) {
-          log.info("Credentials for " + alias + " found in " + provider);
-        } else {
-          if (console == null) {
-            throw new IOException("Unable to input password for " + alias +
-                " because System.console() is null; provider " + provider +
-                " must be populated manually");
-          }
-          char[] pass = readPassword(alias, console);
-          credentialProvider.createCredentialEntry(alias, pass);
-          credentialProvider.flush();
-          Arrays.fill(pass, ' ');
-        }
-      }
-    }
-  }
+//  protected static void checkForCredentials(Configuration conf,
+//      ConfTree tree, String clusterName) throws IOException {
+//    if (tree.credentials == null || tree.credentials.isEmpty()) {
+//      log.info("No credentials requested");
+//      return;
+//    }
+//
+//    Console console = System.console();
+//    for (Entry<String, List<String>> cred : tree.credentials.entrySet()) {
+//      String provider = cred.getKey()
+//          .replaceAll(Pattern.quote("${CLUSTER_NAME}"), clusterName)
+//          .replaceAll(Pattern.quote("${CLUSTER}"), clusterName);
+//      List<String> aliases = cred.getValue();
+//      if (aliases == null || aliases.isEmpty()) {
+//        continue;
+//      }
+//      Configuration c = new Configuration(conf);
+//      c.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, provider);
+//      CredentialProvider credentialProvider = CredentialProviderFactory.getProviders(c).get(0);
+//      Set<String> existingAliases = new HashSet<>(credentialProvider.getAliases());
+//      for (String alias : aliases) {
+//        if (existingAliases.contains(alias.toLowerCase(Locale.ENGLISH))) {
+//          log.info("Credentials for " + alias + " found in " + provider);
+//        } else {
+//          if (console == null) {
+//            throw new IOException("Unable to input password for " + alias +
+//                " because System.console() is null; provider " + provider +
+//                " must be populated manually");
+//          }
+//          char[] pass = readPassword(alias, console);
+//          credentialProvider.createCredentialEntry(alias, pass);
+//          credentialProvider.flush();
+//          Arrays.fill(pass, ' ');
+//        }
+//      }
+//    }
+//  }
 
   private static char[] readPassword(String alias, Console console)
       throws IOException {
@@ -1347,56 +1334,57 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   }
 
   private int actionPackageInstances() throws YarnException, IOException {
-    Map<String, Path> persistentInstances = sliderFileSystem
-        .listPersistentInstances();
-    if (persistentInstances.isEmpty()) {
-      log.info("No slider cluster specification available");
-      return EXIT_SUCCESS;
-    }
-    String pkgPathValue = sliderFileSystem
-        .buildPackageDirPath(StringUtils.EMPTY, StringUtils.EMPTY).toUri()
-        .getPath();
-    FileSystem fs = sliderFileSystem.getFileSystem();
-    Iterator<Map.Entry<String, Path>> instanceItr = persistentInstances
-        .entrySet().iterator();
-    log.info("List of applications with its package name and path");
-    println("%-25s  %15s  %30s  %s", "Cluster Name", "Package Name",
-        "Package Version", "Application Location");
-    while(instanceItr.hasNext()) {
-      Map.Entry<String, Path> entry = instanceItr.next();
-      String clusterName = entry.getKey();
-      Path clusterPath = entry.getValue();
-      AggregateConf instanceDefinition = loadInstanceDefinitionUnresolved(
-          clusterName, clusterPath);
-      Path appDefPath = null;
-      try {
-        appDefPath = new Path(
-            getApplicationDefinitionPath(instanceDefinition
-                .getAppConfOperations()));
-      } catch (BadConfigException e) {
-        // Invalid cluster state, so move on to next. No need to log anything
-        // as this is just listing of instances.
-        continue;
-      }
-      if (!appDefPath.isUriPathAbsolute()) {
-        appDefPath = new Path(fs.getHomeDirectory(), appDefPath);
-      }
-      String appDefPathStr = appDefPath.toUri().toString();
-      try {
-        if (appDefPathStr.contains(pkgPathValue) && fs.isFile(appDefPath)) {
-          String packageName = appDefPath.getParent().getName();
-          String packageVersion = StringUtils.EMPTY;
-          if (instanceDefinition.isVersioned()) {
-            packageVersion = packageName;
-            packageName = appDefPath.getParent().getParent().getName();
-          }
-          println("%-25s  %15s  %30s  %s", clusterName, packageName,
-              packageVersion, appDefPathStr);
-        }
-      } catch (IOException e) {
-        log.debug("{} application definition path {} is not found.", clusterName, appDefPathStr);
-      }
-    }
+//    Map<String, Path> persistentInstances = sliderFileSystem
+//        .listPersistentInstances();
+//    if (persistentInstances.isEmpty()) {
+//      log.info("No slider cluster specification available");
+//      return EXIT_SUCCESS;
+//    }
+//    String pkgPathValue = sliderFileSystem
+//        .buildPackageDirPath(StringUtils.EMPTY, StringUtils.EMPTY).toUri()
+//        .getPath();
+//    FileSystem fs = sliderFileSystem.getFileSystem();
+//    Iterator<Map.Entry<String, Path>> instanceItr = persistentInstances
+//        .entrySet().iterator();
+//    log.info("List of applications with its package name and path");
+//    println("%-25s  %15s  %30s  %s", "Cluster Name", "Package Name",
+//        "Package Version", "Application Location");
+    //TODO deal with packages
+//    while(instanceItr.hasNext()) {
+//      Map.Entry<String, Path> entry = instanceItr.next();
+//      String clusterName = entry.getKey();
+//      Path clusterPath = entry.getValue();
+//      AggregateConf instanceDefinition = loadInstanceDefinitionUnresolved(
+//          clusterName, clusterPath);
+//      Path appDefPath = null;
+//      try {
+//        appDefPath = new Path(
+//            getApplicationDefinitionPath(instanceDefinition
+//                .getAppConfOperations()));
+//      } catch (BadConfigException e) {
+//        // Invalid cluster state, so move on to next. No need to log anything
+//        // as this is just listing of instances.
+//        continue;
+//      }
+//      if (!appDefPath.isUriPathAbsolute()) {
+//        appDefPath = new Path(fs.getHomeDirectory(), appDefPath);
+//      }
+//      String appDefPathStr = appDefPath.toUri().toString();
+//      try {
+//        if (appDefPathStr.contains(pkgPathValue) && fs.isFile(appDefPath)) {
+//          String packageName = appDefPath.getParent().getName();
+//          String packageVersion = StringUtils.EMPTY;
+//          if (instanceDefinition.isVersioned()) {
+//            packageVersion = packageName;
+//            packageName = appDefPath.getParent().getParent().getName();
+//          }
+//          println("%-25s  %15s  %30s  %s", clusterName, packageName,
+//              packageVersion, appDefPathStr);
+//        }
+//      } catch (IOException e) {
+//        log.debug("{} application definition path {} is not found.", clusterName, appDefPathStr);
+//      }
+//    }
     return EXIT_SUCCESS;
   }
 
@@ -1565,29 +1553,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
     return newTimeout;
   }
 
-  /**
-   * Load the instance definition. It is not resolved at this point
-   * @param name cluster name
-   * @param clusterDirectory cluster dir
-   * @return the loaded configuration
-   * @throws IOException
-   * @throws SliderException
-   * @throws UnknownApplicationInstanceException if the file is not found
-   */
-  public AggregateConf loadInstanceDefinitionUnresolved(String name,
-            Path clusterDirectory) throws IOException, SliderException {
-
-    try {
-      AggregateConf definition =
-        InstanceIO.loadInstanceDefinitionUnresolved(sliderFileSystem,
-                                                    clusterDirectory);
-      definition.setName(name);
-      return definition;
-    } catch (FileNotFoundException e) {
-      throw UnknownApplicationInstanceException.unknownInstance(name, e);
-    }
-  }
-
   protected Map<String, String> getAmLaunchEnv(Configuration config) {
     String sliderAmLaunchEnv = config.get(KEY_AM_LAUNCH_ENV);
     log.debug("{} = {}", KEY_AM_LAUNCH_ENV, sliderAmLaunchEnv);
@@ -2237,55 +2202,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   }
 
   /**
-   * List all node UUIDs in a role
-   * @param role role name or "" for all
-   * @return an array of UUID strings
-   * @throws IOException
-   * @throws YarnException
-   */
-  @VisibleForTesting
-  public String[] listNodeUUIDsByRole(String role) throws
-                                               IOException,
-                                               YarnException {
-    return createClusterOperations()
-              .listNodeUUIDsByRole(role);
-  }
-
-  /**
-   * List all nodes in a role. This is a double round trip: once to list
-   * the nodes in a role, another to get their details
-   * @param role component/role to look for
-   * @return an array of ContainerNode instances
-   * @throws IOException
-   * @throws YarnException
-   */
-  @VisibleForTesting
-  public List<ClusterNode> listClusterNodesInRole(String role) throws
-                                               IOException,
-                                               YarnException {
-    return createClusterOperations().listClusterNodesInRole(role);
-  }
-
-  /**
-   * Get the details on a list of uuids
-   * @param uuids uuids to ask for 
-   * @return a possibly empty list of node details
-   * @throws IOException
-   * @throws YarnException
-   */
-  @VisibleForTesting
-  public List<ClusterNode> listClusterNodes(String[] uuids) throws
-                                               IOException,
-                                               YarnException {
-
-    if (uuids.length == 0) {
-      // short cut on an empty list
-      return new LinkedList<>();
-    }
-    return createClusterOperations().listClusterNodes(uuids);
-  }
-
-  /**
    * Bond to a running cluster
    * @param clustername cluster name
    * @return the AM RPC client
@@ -2320,39 +2236,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   }
 
   /**
-   * Create a cluster operations instance against the active cluster
-   * -returning any previous created one if held.
-   * @return a bonded cluster operations instance
-   * @throws YarnException YARN issues
-   * @throws IOException IO problems
-   */
-  private SliderClusterOperations createClusterOperations() throws
-                                                         YarnException,
-                                                         IOException {
-    if (sliderClusterOperations == null) {
-      sliderClusterOperations =
-        createClusterOperations(getDeployedClusterName());
-    }
-    return sliderClusterOperations;
-  }
-
-  /**
-   * Wait for an instance of a named role to be live (or past it in the lifecycle)
-   * @param role role to look for
-   * @param timeout time to wait
-   * @return the state. If still in CREATED, the cluster didn't come up
-   * in the time period. If LIVE, all is well. If >LIVE, it has shut for a reason
-   * @throws IOException IO
-   * @throws SliderException Slider
-   * @throws WaitTimeoutException if the wait timed out
-   */
-  @VisibleForTesting
-  public int waitForRoleInstanceLive(String role, long timeout)
-    throws WaitTimeoutException, IOException, YarnException {
-    return createClusterOperations().waitForRoleInstanceLive(role, timeout);
-  }
-
-  /**
    * Generate an exception for an unknown cluster
    * @param clustername cluster name
    * @return an exception with text and a relevant exit code
@@ -2546,11 +2429,18 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
       if (diagnosticArgs.client) {
         actionDiagnosticClient(diagnosticArgs);
       } else if (diagnosticArgs.application) {
-        actionDiagnosticApplication(diagnosticArgs);
+        // TODO print configs of application - get from AM
       } else if (diagnosticArgs.yarn) {
-        actionDiagnosticYarn(diagnosticArgs);
+        // This method prints yarn nodes info and yarn configs.
+        // We can just use yarn node CLI instead which is much more richful
+        // for yarn configs, this method reads local config which is only client
+        // config not cluster configs.
+//        actionDiagnosticYarn(diagnosticArgs);
       } else if (diagnosticArgs.credentials) {
-        actionDiagnosticCredentials();
+        // actionDiagnosticCredentials internall only runs a bare 'klist' command...
+        // IMHO, the user can just run klist on their own with extra options supported, don't
+        // actually see the point of this method.
+//        actionDiagnosticCredentials();
       } else if (diagnosticArgs.all) {
         actionDiagnosticAll(diagnosticArgs);
       } else if (diagnosticArgs.level) {
@@ -2571,122 +2461,11 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
       throws IOException, YarnException {
     // assign application name from param to each sub diagnostic function
     actionDiagnosticClient(diagnosticArgs);
-    actionDiagnosticApplication(diagnosticArgs);
-    actionDiagnosticSlider(diagnosticArgs);
-    actionDiagnosticYarn(diagnosticArgs);
-    actionDiagnosticCredentials();
-  }
-
-  private void actionDiagnosticCredentials() throws BadConfigException,
-      IOException {
-    if (isHadoopClusterSecure(loadSliderClientXML())) {
-      String credentialCacheFileDescription = null;
-      try {
-        credentialCacheFileDescription = checkCredentialCacheFile();
-      } catch (BadConfigException e) {
-        log.error("The credential config is not valid: " + e.toString());
-        throw e;
-      } catch (IOException e) {
-        log.error("Unable to read the credential file: " + e.toString());
-        throw e;
-      }
-      log.info("Credential cache file for the current user: "
-          + credentialCacheFileDescription);
-    } else {
-      log.info("the cluster is not in secure mode");
-    }
-  }
-
-  private void actionDiagnosticYarn(ActionDiagnosticArgs diagnosticArgs)
-      throws IOException, YarnException {
-    JSONObject converter = null;
-    log.info("the node in the YARN cluster has below state: ");
-    List<NodeReport> yarnClusterInfo;
-    try {
-      yarnClusterInfo = yarnClient.getNodeReports(NodeState.RUNNING);
-    } catch (YarnException e1) {
-      log.error("Exception happened when fetching node report from the YARN cluster: "
-          + e1.toString());
-      throw e1;
-    } catch (IOException e1) {
-      log.error("Network problem happened when fetching node report YARN cluster: "
-          + e1.toString());
-      throw e1;
-    }
-    for (NodeReport nodeReport : yarnClusterInfo) {
-      log.info(nodeReport.toString());
-    }
-
-    if (diagnosticArgs.verbose) {
-      Writer configWriter = new StringWriter();
-      try {
-        Configuration.dumpConfiguration(yarnClient.getConfig(), configWriter);
-      } catch (IOException e1) {
-        log.error("Network problem happened when retrieving YARN config from YARN: "
-            + e1.toString());
-        throw e1;
-      }
-      try {
-        converter = new JSONObject(configWriter.toString());
-        log.info("the configuration of the YARN cluster is: "
-            + converter.toString(2));
-
-      } catch (JSONException e) {
-        log.error("JSONException happened during parsing response from YARN: "
-            + e.toString());
-      }
-    }
-  }
-
-  private void actionDiagnosticSlider(ActionDiagnosticArgs diagnosticArgs)
-      throws YarnException, IOException {
-    // not using member variable clustername because we want to place
-    // application name after --application option and member variable
-    // cluster name has to be put behind action
-    String clusterName = diagnosticArgs.name;
-    if(isUnset(clusterName)){
-      throw new BadCommandArgumentsException("application name must be provided with --name option");
-    }
-    AggregateConf instanceDefinition = new AggregateConf();
-    String imagePath = instanceDefinition.getInternalOperations().get(
-        INTERNAL_APPLICATION_IMAGE_PATH);
-    // if null, it will be uploaded by Slider and thus at slider's path
-    if (imagePath == null) {
-      ApplicationReport appReport = findInstance(clusterName);
-      if (appReport != null) {
-        Path path1 = sliderFileSystem.getTempPathForCluster(clusterName);
-        Path subPath = new Path(path1, appReport.getApplicationId().toString()
-            + "/agent");
-        imagePath = subPath.toString();
-      }
-    }
-    log.info("The path of slider agent tarball on HDFS is: " + imagePath);
-  }
-
-  private void actionDiagnosticApplication(ActionDiagnosticArgs diagnosticArgs)
-      throws YarnException, IOException {
-    // not using member variable clustername because we want to place
-    // application name after --application option and member variable
-    // cluster name has to be put behind action
-    String clusterName = diagnosticArgs.name;
-    requireArgumentSet(Arguments.ARG_NAME, clusterName);
-    AggregateConf instanceDefinition = new AggregateConf();
-    String clusterDir = instanceDefinition.getAppConfOperations()
-        .getGlobalOptions().get(AgentKeys.APP_ROOT);
-    String pkgTarball = getApplicationDefinitionPath(instanceDefinition.getAppConfOperations());
-    String runAsUser = instanceDefinition.getAppConfOperations()
-        .getGlobalOptions().get(AgentKeys.RUNAS_USER);
-
-    log.info("The location of the cluster instance directory in HDFS is: {}", clusterDir);
-    log.info("The name of the application package tarball on HDFS is: {}",pkgTarball);
-    log.info("The runas user of the application in the cluster is: {}",runAsUser);
-
-    if (diagnosticArgs.verbose) {
-      log.info("App config of the application:\n{}",
-          instanceDefinition.getAppConf().toJson());
-      log.info("Resource config of the application:\n{}",
-          instanceDefinition.getResources().toJson());
-    }
+    // actionDiagnosticSlider only prints the agent location on hdfs,
+    // which is invalid now.
+    // actionDiagnosticCredentials only runs 'klist' command, IMHO, the user
+    // can just run klist on its own with extra options supported, don't
+    // actually see the point of this method.
   }
 
   private void actionDiagnosticClient(ActionDiagnosticArgs diagnosticArgs)
@@ -3242,16 +3021,6 @@ public class SliderClient extends AbstractSliderLaunchedService implements RunSe
   }
 
   /**
-   * Create a new IPC client for talking to slider via what follows the REST API.
-   * Client must already be bonded to the cluster
-   * @return a new IPC client
-   */
-  public SliderApplicationApi createIpcClient()
-    throws IOException, YarnException {
-    return new SliderApplicationIpcClient(createClusterOperations());
-  }
-
-  /**
    * Save/list tokens. This is for testing oozie integration
    * @param args commands
    * @return status

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderApplicationIpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderApplicationIpcClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderApplicationIpcClient.java
deleted file mode 100644
index 3b5147f..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderApplicationIpcClient.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.client.ipc;
-
-import com.google.common.base.Preconditions;
-import org.apache.slider.api.SliderClusterProtocol;
-import org.apache.slider.api.types.ApplicationLivenessInformation;
-import org.apache.slider.api.types.ComponentInformation;
-import org.apache.slider.api.types.ContainerInformation;
-import org.apache.slider.api.types.NodeInformation;
-import org.apache.slider.api.types.NodeInformationList;
-import org.apache.slider.api.types.PingInformation;
-import org.apache.slider.api.SliderApplicationApi;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTree;
-import org.apache.slider.core.conf.ConfTreeOperations;
-import org.apache.slider.core.exceptions.NoSuchNodeException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.Map;
-
-/**
- * Implementation of the Slider RESTy Application API over IPC.
- * <p>
- * Operations are executed via the {@link SliderClusterOperations}
- * instance passed in; raised exceptions may be converted into ones
- * consistent with the REST API.
- */
-public class SliderApplicationIpcClient implements SliderApplicationApi {
-
-  private static final Logger log =
-      LoggerFactory.getLogger(SliderApplicationIpcClient.class);
-
-  private final SliderClusterOperations operations;
-
-  public SliderApplicationIpcClient(SliderClusterOperations operations) {
-    Preconditions.checkArgument(operations != null, "null operations");
-    this.operations = operations;
-  }
-
-  /**
-   * Convert received (And potentially unmarshalled) local/remote
-   * exceptions into the equivalents in the REST API.
-   * Best effort. 
-   * <p>
-   * If there is no translation, the original exception is returned.
-   * <p>
-   * If a new exception was created, it will have the message of the 
-   * string value of the original exception, and that original
-   * exception will be the nested cause of this one
-   * @param exception IOException to convert
-   * @return an exception to throw
-   */
-  private IOException convert(IOException exception) {
-    IOException result = exception;
-    if (exception instanceof NoSuchNodeException) {
-      result = new FileNotFoundException(exception.toString());
-      result.initCause(exception);
-    } else {
-      // TODO: remap any other exceptions
-    }
-    return result;
-  }
-  
-  public SliderApplicationIpcClient(SliderClusterProtocol proxy) {
-    this(new SliderClusterOperations(proxy));
-  }
-
-  @Override
-  public AggregateConf getDesiredModel() throws IOException {
-    try {
-      return operations.getModelDesired();
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public ConfTreeOperations getDesiredAppconf() throws IOException {
-    try {
-      return operations.getModelDesiredAppconf();
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public ConfTreeOperations getDesiredResources() throws IOException {
-    try {
-      return operations.getModelDesiredResources();
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public AggregateConf getResolvedModel() throws IOException {
-    try {
-      return operations.getModelResolved();
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public ConfTreeOperations getResolvedAppconf() throws IOException {
-    try {
-      return operations.getModelResolvedAppconf();
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public ConfTreeOperations getResolvedResources() throws IOException {
-    try {
-      return operations.getModelResolvedResources();
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public ConfTreeOperations getLiveResources() throws IOException {
-    try {
-      return operations.getLiveResources();
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public Map<String, ContainerInformation> enumContainers() throws IOException {
-    try {
-      return operations.enumContainers();
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public ContainerInformation getContainer(String containerId) throws
-      IOException {
-    try {
-      return operations.getContainer(containerId);
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public Map<String, ComponentInformation> enumComponents() throws IOException {
-    try {
-      return operations.enumComponents();
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public ComponentInformation getComponent(String componentName) throws IOException {
-    try {
-      return operations.getComponent(componentName);
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public NodeInformationList getLiveNodes() throws IOException {
-    try {
-      return operations.getLiveNodes();
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public NodeInformation getLiveNode(String hostname) throws IOException {
-    try {
-      return operations.getLiveNode(hostname);
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public PingInformation ping(String text) throws IOException {
-    return null;
-  }
-
-  @Override
-  public void stop(String text) throws IOException {
-    try {
-      operations.stop(text);
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public ApplicationLivenessInformation getApplicationLiveness() throws
-      IOException {
-    try {
-      return operations.getApplicationLiveness();
-    } catch (IOException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public String toString() {
-    return "IPC implementation of SliderApplicationApi bonded to " + operations;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java
index 623b8b0..3bb2af6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/ipc/SliderClusterOperations.java
@@ -20,27 +20,20 @@ package org.apache.slider.client.ipc;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.slider.api.ClusterDescription;
 import org.apache.slider.api.ClusterNode;
 import org.apache.slider.api.SliderClusterProtocol;
 import org.apache.slider.api.StateValues;
 import org.apache.slider.api.proto.Messages;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.api.resource.Component;
-import org.apache.slider.api.types.ApplicationLivenessInformation;
-import org.apache.slider.api.types.ComponentInformation;
 import org.apache.slider.api.types.ContainerInformation;
 import org.apache.slider.api.types.NodeInformation;
 import org.apache.slider.api.types.NodeInformationList;
 import org.apache.slider.api.types.PingInformation;
 import org.apache.slider.common.tools.Duration;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTree;
-import org.apache.slider.core.conf.ConfTreeOperations;
 import org.apache.slider.core.exceptions.NoSuchNodeException;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.exceptions.WaitTimeoutException;
-import org.apache.slider.core.persist.ConfTreeSerDeser;
 import org.apache.slider.core.persist.JsonSerDeser;
 import org.codehaus.jackson.JsonParseException;
 import org.slf4j.Logger;
@@ -50,11 +43,9 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 
-import static org.apache.slider.api.types.RestTypeMarshalling.*;
+import static org.apache.slider.api.types.RestTypeMarshalling.unmarshall;
 
 /**
  * Cluster operations at a slightly higher level than the RPC code
@@ -322,85 +313,6 @@ public class SliderClusterOperations {
     appMaster.amSuicide(req);
   }
 
-  /**
-   * Get the application liveness
-   * @return current liveness information
-   * @throws IOException
-   */
-  public ApplicationLivenessInformation getLivenessInformation() throws IOException {
-    Messages.GetApplicationLivenessRequestProto.Builder builder =
-        Messages.GetApplicationLivenessRequestProto.newBuilder();
-    Messages.ApplicationLivenessInformationProto wire =
-        appMaster.getLivenessInformation(builder.build());
-    return unmarshall(wire);
-
-  }
-
-  public AggregateConf getModelDesired() throws IOException {
-    return unmarshallToAggregateConf(appMaster.getModelDesired(EMPTY));
-  }
-
-  
-  public ConfTreeOperations getModelDesiredAppconf() throws IOException {
-    return unmarshallToCTO(appMaster.getModelDesiredAppconf(EMPTY));
-  }
-
-  
-  public ConfTreeOperations getModelDesiredResources() throws IOException {
-    return unmarshallToCTO(appMaster.getModelDesiredResources(EMPTY));
-  }
-
-  
-  public AggregateConf getModelResolved() throws IOException {
-    return unmarshallToAggregateConf(appMaster.getModelResolved(EMPTY));
-  }
-
-  
-  public ConfTreeOperations getModelResolvedAppconf() throws IOException {
-    return unmarshallToCTO(appMaster.getModelResolvedAppconf(EMPTY));
-  }
-
-  
-  public ConfTreeOperations getModelResolvedResources() throws IOException {
-    return unmarshallToCTO(appMaster.getModelDesiredResources(EMPTY));
-  }
-
-  
-  public ConfTreeOperations getLiveResources() throws IOException {
-    return unmarshallToCTO(appMaster.getLiveResources(EMPTY));
-  }
-
-  
-  public Map<String, ContainerInformation> enumContainers() throws IOException {
-    Messages.GetLiveContainersResponseProto response =
-        appMaster.getLiveContainers(
-            Messages.GetLiveContainersRequestProto.newBuilder().build());
-
-    int namesCount = response.getNamesCount();
-    int records = response.getContainersCount();
-    if (namesCount != records) {
-      throw new IOException("Number of names returned (" + namesCount
-                      + ") does not match the number of records returned: " 
-                      + records);
-    }
-    Map<String, ContainerInformation> map = new HashMap<>(namesCount);
-    for (int i = 0; i < namesCount; i++) {
-      map.put(response.getNames(i), unmarshall(response.getContainers(i)));
-    }
-    return map;
-  }
-
-  
-  public ContainerInformation getContainer(String containerId) throws
-      IOException {
-    Messages.ContainerInformationProto response =
-        appMaster.getLiveContainer(
-            Messages.GetLiveContainerRequestProto.newBuilder()
-                                                 .setContainerId(containerId)
-                                                 .build());
-    return unmarshall(response);
-  }
-
   public List<ContainerInformation> getContainers() throws IOException {
     Messages.GetLiveContainersResponseProto response = appMaster
         .getLiveContainers(Messages.GetLiveContainersRequestProto.newBuilder()
@@ -408,34 +320,6 @@ public class SliderClusterOperations {
     return unmarshall(response);
   }
 
-  public Map<String, ComponentInformation> enumComponents() throws IOException {
-    Messages.GetLiveComponentsResponseProto response =
-        appMaster.getLiveComponents(
-            Messages.GetLiveComponentsRequestProto.newBuilder().build());
-
-    int namesCount = response.getNamesCount();
-    int records = response.getComponentsCount();
-    if (namesCount != records) {
-      throw new IOException(
-          "Number of names returned (" + namesCount + ")" +
-          " does not match the number of records returned: " + records);
-    }
-    Map<String, ComponentInformation> map = new HashMap<>(namesCount);
-    for (int i = 0; i < namesCount; i++) {
-      map.put(response.getNames(i), unmarshall(response.getComponents(i)));
-    }
-    return map;
-  }
-
-  public ComponentInformation getComponent(String componentName)
-      throws IOException {
-    Messages.GetLiveComponentRequestProto.Builder builder =
-        Messages.GetLiveComponentRequestProto.newBuilder();
-    builder.setName(componentName);
-    Messages.ComponentInformationProto proto = appMaster.getLiveComponent(builder.build());
-    return unmarshall(proto);
-  }
-
   public NodeInformationList getLiveNodes() throws IOException {
     Messages.GetLiveNodesResponseProto response =
       appMaster.getLiveNodes(Messages.GetLiveNodesRequestProto.newBuilder().build());
@@ -462,13 +346,4 @@ public class SliderClusterOperations {
   public void stop(String text) throws IOException {
     amSuicide(text, 3, 0);
   }
-
-  public ApplicationLivenessInformation getApplicationLiveness() throws
-      IOException {
-    Messages.ApplicationLivenessInformationProto proto =
-        appMaster.getLivenessInformation(
-            Messages.GetApplicationLivenessRequestProto.newBuilder().build()
-        );
-    return unmarshall(proto);
-  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org