You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2017/06/21 18:33:53 UTC

[31/50] [abbrv] hadoop git commit: YARN-6400. Remove some unneeded code after YARN-6255. Contributed by Jian He

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/TemplateInputPropertiesValidator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/TemplateInputPropertiesValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/TemplateInputPropertiesValidator.java
deleted file mode 100644
index aad2757..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/conf/TemplateInputPropertiesValidator.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.slider.core.conf;
-
-import org.apache.slider.core.exceptions.BadConfigException;
-
-/**
- *
- */
-public class TemplateInputPropertiesValidator
-    extends AbstractInputPropertiesValidator {
-
-  void validatePropertyNamePrefix(String key) throws BadConfigException {
-    if (key.startsWith("yarn.")) {
-      throw new BadConfigException(
-          "argument %s has 'yarn.' prefix - this is not allowed in templates", key);
-    }
-  }
-
-  @Override
-  void validateGlobalProperties(ConfTreeOperations props) {
-    // do nothing
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
index aefc0de..4182459 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
@@ -304,30 +304,6 @@ public abstract class AbstractLauncher extends Configured {
     }
   }
 
-  /**
-   * Extract the value for option
-   * {@code yarn.resourcemanager.am.retry-count-window-ms}
-   * and set it on the ApplicationSubmissionContext. Use the default value
-   * if option is not set.
-   *
-   * @param submissionContext
-   * @param map
-   */
-  public void extractAmRetryCount(ApplicationSubmissionContext submissionContext,
-                                  Map<String, String> map) {
-
-    if (map != null) {
-      MapOperations options = new MapOperations("", map);
-      long amRetryCountWindow = options.getOptionLong(ResourceKeys
-          .YARN_RESOURCEMANAGER_AM_RETRY_COUNT_WINDOW_MS,
-          ResourceKeys.DEFAULT_AM_RETRY_COUNT_WINDOW_MS);
-      log.info("Setting {} to {}",
-          ResourceKeys.YARN_RESOURCEMANAGER_AM_RETRY_COUNT_WINDOW_MS,
-          amRetryCountWindow);
-      submissionContext.setAttemptFailuresValidityInterval(amRetryCountWindow);
-    }
-  }
-
   public void extractLogAggregationContext(Map<String, String> map) {
     if (map != null) {
       String logPatternSepStr = "\\|";
@@ -453,24 +429,6 @@ public abstract class AbstractLauncher extends Configured {
     env.putAll(map);
   }
 
-  /**
-   * Important: the configuration must already be fully resolved 
-   * in order to pick up global options
-   * Copy env vars into the launch context.
-   */
-  public boolean copyEnvVars(MapOperations options) {
-    if (options == null) {
-      return false;
-    }
-    for (Map.Entry<String, String> entry : options.entrySet()) {
-      String key = entry.getKey();
-      if (key.startsWith(RoleKeys.ENV_PREFIX)) {
-        key = key.substring(RoleKeys.ENV_PREFIX.length());
-        env.put(key, entry.getValue());
-      }
-    }
-    return true;
-  }
 
   public String[] dumpEnvToString() {
 
@@ -504,19 +462,6 @@ public abstract class AbstractLauncher extends Configured {
     addLocalResources(confResources);
   }
 
-  /**
-   * Return the label expression and if not set null
-   * @param map map to look up
-   * @return extracted label or null
-   */
-  public String extractLabelExpression(Map<String, String> map) {
-    if (map != null) {
-      MapOperations options = new MapOperations("", map);
-      return options.getOption(ResourceKeys.YARN_LABEL_EXPRESSION, null);
-    }
-    return null;
-  }
-
   public void setDockerImage(String dockerImage) {
     this.dockerImage = dockerImage;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/AggregateConfSerDeser.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/AggregateConfSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/AggregateConfSerDeser.java
deleted file mode 100644
index 90537b6..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/AggregateConfSerDeser.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.core.persist;
-
-import org.apache.slider.core.conf.AggregateConf;
-import org.codehaus.jackson.JsonGenerationException;
-import org.codehaus.jackson.JsonParseException;
-import org.codehaus.jackson.map.JsonMappingException;
-
-import java.io.IOException;
-
-/**
- * Conf tree to JSON binding
- */
-public class AggregateConfSerDeser extends JsonSerDeser<AggregateConf> {
-  public AggregateConfSerDeser() {
-    super(AggregateConf.class);
-  }
-
-
-  private static final AggregateConfSerDeser
-      staticinstance = new AggregateConfSerDeser();
-
-  /**
-   * Convert a tree instance to a JSON string -sync access to a shared ser/deser
-   * object instance
-   * @param instance object to convert
-   * @return a JSON string description
-   * @throws JsonParseException parse problems
-   * @throws JsonMappingException O/J mapping problems
-   */
-  public static String toString(AggregateConf instance) throws IOException,
-                                                          JsonGenerationException,
-                                                          JsonMappingException {
-    synchronized (staticinstance) {
-      return staticinstance.toJson(instance);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfPersister.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfPersister.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfPersister.java
deleted file mode 100644
index 9759205..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfPersister.java
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.core.persist;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.slider.common.tools.CoreFileSystem;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.exceptions.SliderException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.Date;
-
-/**
- * Class to implement persistence of a configuration.
- *
- * This code contains the logic to acquire and release locks.
- * # writelock MUST be acquired exclusively for writes. This is done
- * by creating the file with no overwrite
- * # shared readlock MUST be acquired for reads. This is done by creating the readlock
- * file with overwrite forbidden -but treating a failure as a sign that
- * the lock exists, and therefore the operation can continue.
- * # releaselock is only released if the client created it.
- * # after acquiring either lock, client must check for the alternate lock
- * existing. If it is, release lock and fail.
- * 
- * There's one small race here: multiple readers; first reader releases lock
- * while second is in use. 
- * 
- * Strict Fix: client checks for readlock after read completed.
- * If it is not there, problem: fail. But this massively increases the risk of
- * false negatives.
- * 
- * This isn't 100% perfect, because of the condition where the owner releases
- * a lock, a writer grabs its lock & writes to it, the reader gets slightly
- * contaminated data:
- * own-share-delete-write-own-release(shared)-delete
- * 
- * We are assuming that the rate of change is low enough that this is rare, and
- * of limited damage.
- * 
- * ONCE A CLUSTER IS RUNNING, ONLY THE AM MAY PERSIST UPDATES VIA ITS APIs
- * 
- * That is: outside the AM, a writelock MUST only be acquired after verifying there is no
- * running application.
- */
-public class ConfPersister {
-  private static final Logger log =
-    LoggerFactory.getLogger(ConfPersister.class);
-
-
-  private final ConfTreeSerDeser confTreeSerDeser = new ConfTreeSerDeser();
-
-  private final CoreFileSystem coreFS;
-  private final FileSystem fileSystem;
-  private final Path persistDir;
-  private final Path internal, resources, app_conf;
-  private final Path writelock, readlock;
-
-  public ConfPersister(CoreFileSystem coreFS, Path persistDir) {
-    this.coreFS = coreFS;
-    this.persistDir = persistDir;
-    internal = new Path(persistDir, Filenames.INTERNAL);
-    resources = new Path(persistDir, Filenames.RESOURCES);
-    app_conf = new Path(persistDir, Filenames.APPCONF);
-    writelock = new Path(persistDir, Filenames.WRITELOCK);
-    readlock = new Path(persistDir, Filenames.READLOCK);
-    fileSystem = coreFS.getFileSystem();
-  }
-
-  /**
-   * Get the target directory
-   * @return the directory for persistence
-   */
-  public Path getPersistDir() {
-    return persistDir;
-  }
-
-  /**
-   * Make the persistent directory
-   * @throws IOException IO failure
-   */
-  public void mkPersistDir() throws IOException {
-    coreFS.getFileSystem().mkdirs(persistDir);
-  }
-  
-  @Override
-  public String toString() {
-    return "Persister to " + persistDir;
-  }
-
-  /**
-   * Acquire the writelock
-   * @throws IOException IO
-   * @throws LockAcquireFailedException
-   */
-  @VisibleForTesting
-  void acquireWritelock() throws IOException,
-                                 LockAcquireFailedException {
-    mkPersistDir();
-    long now = System.currentTimeMillis();
-    try {
-      coreFS.cat(writelock, false, new Date(now).toGMTString());
-    } catch (FileAlreadyExistsException e) {
-      // filesystems should raise this (HDFS does)
-      throw new LockAcquireFailedException(writelock);
-    } catch (IOException e) {
-      // some filesystems throw a generic IOE
-      throw new LockAcquireFailedException(writelock, e);
-    }
-    //here the lock is acquired, but verify there is no readlock
-    boolean lockFailure;
-    try {
-      lockFailure = readLockExists();
-    } catch (IOException e) {
-      lockFailure = true;
-    }
-    if (lockFailure) {
-      releaseWritelock();
-      throw new LockAcquireFailedException(readlock);
-    }
-  }
-
-  @VisibleForTesting
-  boolean readLockExists() throws IOException {
-    return fileSystem.exists(readlock);
-  }
-
-  /**
-   * Release the writelock if it is present.
-   * IOExceptions are logged
-   */
-  @VisibleForTesting
-  boolean releaseWritelock() {
-    try {
-      return fileSystem.delete(writelock, false);
-    } catch (IOException e) {
-      log.warn("IOException releasing writelock {} ", writelock, e);
-    }
-    return false;
-  }
-  
-  /**
-   * Acquire the writelock
-   * @throws IOException IO
-   * @throws LockAcquireFailedException
-   * @throws FileNotFoundException if the target dir does not exist.
-   */
-  @VisibleForTesting
-  boolean acquireReadLock() throws FileNotFoundException,
-                                  IOException,
-                                  LockAcquireFailedException {
-    if (!coreFS.getFileSystem().exists(persistDir)) {
-      // the dir is not there, so the data is not there, so there
-      // is nothing to read
-      throw new FileNotFoundException(persistDir.toString());
-    }
-    long now = System.currentTimeMillis();
-    boolean owner;
-    try {
-      coreFS.cat(readlock, false, new Date(now).toGMTString());
-      owner = true;
-    } catch (IOException e) {
-      owner = false;
-    }
-    //here the lock is acquired, but verify there is no readlock
-    boolean lockFailure;
-    try {
-      lockFailure = writelockExists();
-    } catch (IOException e) {
-      lockFailure = true;
-    }
-    if (lockFailure) {
-      releaseReadlock(owner);
-      throw new LockAcquireFailedException(writelock);
-    }
-    return owner;
-  }
-
-  @VisibleForTesting
-  boolean writelockExists() throws IOException {
-    return fileSystem.exists(writelock);
-  }
-
-  /**
-   * Release the writelock if it is present.
-   * IOExceptions are downgraded to failures
-   * @return true if the lock was present and then released  
-   */
-  @VisibleForTesting
-  boolean releaseReadlock(boolean owner) {
-    if (owner) {
-      try {
-        return fileSystem.delete(readlock, false);
-      } catch (IOException e) {
-        log.warn("IOException releasing writelock {} ", readlock, e);
-      }
-    }
-    return false;
-  }
-
-  private void saveConf(AggregateConf conf) throws IOException {
-    confTreeSerDeser.save(fileSystem, internal, conf.getInternal(), true);
-    confTreeSerDeser.save(fileSystem, resources, conf.getResources(), true);
-    confTreeSerDeser.save(fileSystem, app_conf, conf.getAppConf(), true);
-  }
-
-  private void loadConf(AggregateConf conf) throws IOException {
-    conf.setInternal(confTreeSerDeser.load(fileSystem, internal));
-    conf.setResources(confTreeSerDeser.load(fileSystem, resources));
-    conf.setAppConf(confTreeSerDeser.load(fileSystem, app_conf));
-  }
-
-
-  private void maybeExecLockHeldAction(LockHeldAction action) throws
-      IOException,
-      SliderException {
-    if (action != null) {
-      action.execute();
-    }
-  }
-  
-  /**
-   * Save the configuration
-   * @param conf configuration to fill in
-   * @param action
-   * @throws IOException IO problems
-   * @throws LockAcquireFailedException the lock could not be acquired
-   */
-  public void save(AggregateConf conf, LockHeldAction action) throws
-      IOException,
-      SliderException,
-      LockAcquireFailedException {
-    acquireWritelock();
-    try {
-      saveConf(conf);
-      maybeExecLockHeldAction(action);
-    } finally {
-      releaseWritelock();
-    }
-  }
-
-  /**
-   * Load the configuration. If a lock failure is raised, the 
-   * contents of the configuration MAY have changed -lock race conditions
-   * are looked for on exit
-   * @param conf configuration to fill in
-   * @throws IOException IO problems
-   * @throws LockAcquireFailedException the lock could not be acquired
-   */
-  public void load(AggregateConf conf) throws
-      FileNotFoundException,
-      IOException,
-      SliderException,
-      LockAcquireFailedException {
-    boolean owner = acquireReadLock();
-    try {
-      loadConf(conf);
-    } finally {
-      releaseReadlock(owner);
-    }
-  }
-  
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfTreeSerDeser.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfTreeSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfTreeSerDeser.java
deleted file mode 100644
index 8271ef1..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/ConfTreeSerDeser.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.core.persist;
-
-import org.apache.slider.core.conf.ConfTree;
-import org.codehaus.jackson.JsonGenerationException;
-import org.codehaus.jackson.JsonParseException;
-import org.codehaus.jackson.map.JsonMappingException;
-
-import java.io.IOException;
-
-/**
- * Conf tree to JSON binding
- */
-public class ConfTreeSerDeser extends JsonSerDeser<ConfTree> {
-  public ConfTreeSerDeser() {
-    super(ConfTree.class);
-  }
-
-
-  private static final ConfTreeSerDeser staticinstance = new ConfTreeSerDeser();
-
-  /**
-   * Convert a tree instance to a JSON string -sync access to a shared ser/deser
-   * object instance
-   * @param instance object to convert
-   * @return a JSON string description
-   * @throws JsonParseException parse problems
-   * @throws JsonMappingException O/J mapping problems
-   */
-  public static String toString(ConfTree instance) throws IOException,
-                                                          JsonGenerationException,
-                                                          JsonMappingException {
-    synchronized (staticinstance) {
-      return staticinstance.toJson(instance);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/LockAcquireFailedException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/LockAcquireFailedException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/LockAcquireFailedException.java
deleted file mode 100644
index da58520..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/persist/LockAcquireFailedException.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.core.persist;
-
-import org.apache.hadoop.fs.Path;
-
-public class LockAcquireFailedException extends Exception {
-  
-  private final Path path;
-
-  public LockAcquireFailedException(Path path) {
-    super("Failed to acquire lock " +path);
-    this.path = path;
-  }
-
-  public LockAcquireFailedException(Path path, Throwable cause) {
-    super("Failed to acquire lock " + path, cause);
-    this.path = path;
-  }
-
-  public Path getPath() {
-    return path;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
index 42e103a..df174f5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/AbstractClientProvider.java
@@ -20,43 +20,22 @@ package org.apache.slider.providers;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.registry.client.api.RegistryOperations;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.common.tools.SliderUtils;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTreeOperations;
-import org.apache.slider.core.conf.MapOperations;
-import org.apache.slider.core.exceptions.BadClusterStateException;
 import org.apache.slider.core.exceptions.SliderException;
-import org.apache.slider.core.launch.AbstractLauncher;
 import org.codehaus.jettison.json.JSONObject;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.File;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
 
-import static org.apache.slider.api.ResourceKeys.COMPONENT_INSTANCES;
-import static org.apache.slider.api.ResourceKeys.DEF_YARN_CORES;
-import static org.apache.slider.api.ResourceKeys.DEF_YARN_MEMORY;
-import static org.apache.slider.api.ResourceKeys.YARN_CORES;
-import static org.apache.slider.api.ResourceKeys.YARN_MEMORY;
-
 public abstract class AbstractClientProvider extends Configured {
   private static final Logger log =
     LoggerFactory.getLogger(AbstractClientProvider.class);
-  protected static final ProviderUtils providerUtils =
-    new ProviderUtils(log);
-
-  public static final String PROVIDER_RESOURCE_BASE =
-    "org/apache/slider/providers/";
-  public static final String PROVIDER_RESOURCE_BASE_ROOT =
-    "/" + PROVIDER_RESOURCE_BASE;
 
   public AbstractClientProvider(Configuration conf) {
     super(conf);
@@ -67,150 +46,6 @@ public abstract class AbstractClientProvider extends Configured {
   public abstract List<ProviderRole> getRoles();
 
   /**
-   * Verify that an instance definition is considered valid by the provider
-   * @param instanceDefinition instance definition
-   * @throws SliderException if the configuration is not valid
-   */
-  public void validateInstanceDefinition(AggregateConf instanceDefinition, SliderFileSystem fs) throws
-      SliderException {
-
-    List<ProviderRole> roles = getRoles();
-    ConfTreeOperations resources =
-      instanceDefinition.getResourceOperations();
-    for (ProviderRole role : roles) {
-      String name = role.name;
-      MapOperations component = resources.getComponent(role.group);
-      if (component != null) {
-        String instances = component.get(COMPONENT_INSTANCES);
-        if (instances == null) {
-          String message = "No instance count provided for " + name;
-          log.error("{} with \n{}", message, resources.toString());
-          throw new BadClusterStateException(message);
-        }
-        String ram = component.get(YARN_MEMORY);
-        String cores = component.get(YARN_CORES);
-
-
-        providerUtils.getRoleResourceRequirement(ram,
-                                                 DEF_YARN_MEMORY,
-                                                 Integer.MAX_VALUE);
-        providerUtils.getRoleResourceRequirement(cores,
-                                                 DEF_YARN_CORES,
-                                                 Integer.MAX_VALUE);
-      }
-    }
-  }
-
-
-  /**
-   * Prepare the AM settings for launch
-   * @param fileSystem filesystem
-   * @param serviceConf configuration of the client
-   * @param launcher launcher to set up
-   * @param instanceDescription instance description being launched
-   * @param snapshotConfDirPath
-   * @param generatedConfDirPath
-   * @param clientConfExtras
-   * @param libdir
-   * @param tempPath
-   * @param miniClusterTestRun flag set to true on a mini cluster run
-   * @throws IOException
-   * @throws SliderException
-   */
-  public void prepareAMAndConfigForLaunch(SliderFileSystem fileSystem,
-      Configuration serviceConf,
-      AbstractLauncher launcher,
-      AggregateConf instanceDescription,
-      Path snapshotConfDirPath,
-      Path generatedConfDirPath,
-      Configuration clientConfExtras,
-      String libdir,
-      Path tempPath,
-      boolean miniClusterTestRun)
-    throws IOException, SliderException {
-    
-  }
-  
-  /**
-   * Load in and merge in templates. Null arguments means "no such template"
-   * @param instanceConf instance to patch 
-   * @param internalTemplate patch to internal.json
-   * @param resourceTemplate path to resources.json
-   * @param appConfTemplate path to app_conf.json
-   * @throws IOException any IO problems
-   */
-  protected void mergeTemplates(AggregateConf instanceConf,
-                                String internalTemplate,
-                                String resourceTemplate,
-                                String appConfTemplate) throws IOException {
-    if (internalTemplate != null) {
-      ConfTreeOperations template =
-        ConfTreeOperations.fromResource(internalTemplate);
-      instanceConf.getInternalOperations()
-                  .mergeWithoutOverwrite(template.confTree);
-    }
-
-    if (resourceTemplate != null) {
-      ConfTreeOperations resTemplate =
-        ConfTreeOperations.fromResource(resourceTemplate);
-      instanceConf.getResourceOperations()
-                   .mergeWithoutOverwrite(resTemplate.confTree);
-    }
-   
-    if (appConfTemplate != null) {
-      ConfTreeOperations template =
-        ConfTreeOperations.fromResource(appConfTemplate);
-      instanceConf.getAppConfOperations()
-                   .mergeWithoutOverwrite(template.confTree);
-    }
-    
-  }
-
-  /**
-   * This is called pre-launch to validate that the cluster specification
-   * is valid. This can include checking that the security options
-   * are in the site files prior to launch, that there are no conflicting operations
-   * etc.
-   *
-   * This check is made prior to every launch of the cluster -so can 
-   * pick up problems which manually edited cluster files have added,
-   * or from specification files from previous versions.
-   *
-   * The provider MUST NOT change the remote specification. This is
-   * purely a pre-launch validation of options.
-   *
-   *
-   * @param sliderFileSystem filesystem
-   * @param clustername name of the cluster
-   * @param configuration cluster configuration
-   * @param instanceDefinition cluster specification
-   * @param clusterDirPath directory of the cluster
-   * @param generatedConfDirPath path to place generated artifacts
-   * @param secure flag to indicate that the cluster is secure
-   * @throws SliderException on any validation issue
-   * @throws IOException on any IO problem
-   */
-  public void preflightValidateClusterConfiguration(SliderFileSystem sliderFileSystem,
-                                                      String clustername,
-                                                      Configuration configuration,
-                                                      AggregateConf instanceDefinition,
-                                                      Path clusterDirPath,
-                                                      Path generatedConfDirPath,
-                                                      boolean secure)
-      throws SliderException, IOException {
-    validateInstanceDefinition(instanceDefinition, sliderFileSystem);
-  }
-
-  /**
-   * Return a set of application specific string tags.
-   * @return the set of tags.
-   */
-  public Set<String> getApplicationTags(SliderFileSystem fileSystem,
-      ConfTreeOperations appConf, String appName) throws SliderException {
-    return Collections.emptySet();
-  }
-
-  /**
    * Generates a fixed format of application tags given one or more of
    * application name, version and description. This allows subsequent query for
    * an application with a name only, version only or description only or any

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
index c80de7f..c31b2ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java
@@ -18,29 +18,18 @@
 
 package org.apache.slider.providers;
 
-import org.apache.hadoop.registry.client.types.ServiceRecord;
 import org.apache.hadoop.service.Service;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.slider.api.ClusterDescription;
 import org.apache.slider.api.resource.Application;
 import org.apache.slider.common.tools.SliderFileSystem;
-import org.apache.slider.core.conf.AggregateConf;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.launch.ContainerLauncher;
-import org.apache.slider.core.main.ExitCodeProvider;
-import org.apache.slider.server.appmaster.actions.QueueAccess;
-import org.apache.slider.server.appmaster.operations.RMOperationHandlerActions;
-import org.apache.slider.server.appmaster.state.ContainerReleaseSelector;
 import org.apache.slider.server.appmaster.state.StateAccessForProviders;
 import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders;
 
-import java.io.File;
 import java.io.IOException;
-import java.net.URL;
-import java.util.List;
-import java.util.Map;
 
 public interface ProviderService extends Service {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java
index 8b88c28..1d5d8a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerClientProvider.java
@@ -19,10 +19,6 @@ package org.apache.slider.providers.docker;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.slider.common.SliderKeys;
-import org.apache.slider.common.tools.SliderFileSystem;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTreeOperations;
-import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.providers.AbstractClientProvider;
 import org.apache.slider.providers.ProviderRole;
 import org.apache.slider.providers.ProviderUtils;
@@ -31,7 +27,6 @@ import org.slf4j.LoggerFactory;
 
 import java.util.Collections;
 import java.util.List;
-import java.util.Set;
 
 public class DockerClientProvider extends AbstractClientProvider
     implements SliderKeys {
@@ -55,17 +50,4 @@ public class DockerClientProvider extends AbstractClientProvider
     return Collections.emptyList();
   }
 
-  @Override
-  public void validateInstanceDefinition(AggregateConf instanceDefinition,
-      SliderFileSystem fs) throws SliderException {
-    super.validateInstanceDefinition(instanceDefinition, fs);
-    //TODO validate Application payload, part of that is already done in ApplicationApiService, need to do more
-  }
-
-  @Override
-  public Set<String> getApplicationTags(SliderFileSystem fileSystem,
-      ConfTreeOperations appConf, String appName) throws SliderException {
-    return createApplicationTags(appName, null, null);
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
index 4fa2769..eca07e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.registry.client.api.RegistryOperations;
 import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
@@ -76,7 +75,6 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
-import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.webapp.WebAppException;
 import org.apache.hadoop.yarn.webapp.WebApps;
@@ -98,7 +96,6 @@ import org.apache.slider.common.tools.PortScanner;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.common.tools.SliderUtils;
 import org.apache.slider.common.tools.SliderVersionInfo;
-import org.apache.slider.core.conf.AggregateConf;
 import org.apache.slider.core.conf.MapOperations;
 import org.apache.slider.core.exceptions.BadConfigException;
 import org.apache.slider.core.exceptions.SliderException;
@@ -137,7 +134,6 @@ import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
 import org.apache.slider.server.appmaster.operations.AsyncRMOperationHandler;
 import org.apache.slider.server.appmaster.operations.RMOperationHandler;
 import org.apache.slider.server.appmaster.rpc.RpcBinder;
-import org.apache.slider.server.appmaster.rpc.SliderAMPolicyProvider;
 import org.apache.slider.server.appmaster.rpc.SliderClusterProtocolPBImpl;
 import org.apache.slider.server.appmaster.rpc.SliderIPCService;
 import org.apache.slider.server.appmaster.security.SecurityConfiguration;
@@ -384,13 +380,10 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    * added as a child and inited in {@link #serviceInit(Configuration)}
    */
   private final QueueService actionQueues = new QueueService();
-  private String agentOpsUrl;
-  private String agentStatusUrl;
   private YarnRegistryViewForProviders yarnRegistryOperations;
   //private FsDelegationTokenManager fsDelegationTokenManager;
   private RegisterApplicationMasterResponse amRegistrationData;
   private PortScanner portScanner;
-  private SecurityConfiguration securityConfiguration;
 
   /**
    * Is security enabled?
@@ -752,31 +745,31 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
       // the max value as part of its lookup
       rmOperationHandler = new AsyncRMOperationHandler(asyncRMClient, maximumResourceCapability);
 
-//      processAMCredentials(securityConfiguration);
-
-      if (securityEnabled) {
-        secretManager.setMasterKey(
-            amRegistrationData.getClientToAMTokenMasterKey().array());
-        applicationACLs = amRegistrationData.getApplicationACLs();
-
-        //tell the server what the ACLs are
-        rpcService.getServer().refreshServiceAcl(serviceConf,
-            new SliderAMPolicyProvider());
-        if (securityConfiguration.isKeytabProvided()) {
-          // perform keytab based login to establish kerberos authenticated
-          // principal.  Can do so now since AM registration with RM above required
-          // tokens associated to principal
-          String principal = securityConfiguration.getPrincipal();
-          //TODO read key tab file from slider-am.xml
-          File localKeytabFile =
-              securityConfiguration.getKeytabFile(new AggregateConf());
-          // Now log in...
-          login(principal, localKeytabFile);
-          // obtain new FS reference that should be kerberos based and different
-          // than the previously cached reference
-          fs = new SliderFileSystem(serviceConf);
-        }
-      }
+      stripAMRMToken();
+
+//      if (securityEnabled) {
+//        secretManager.setMasterKey(
+//            amRegistrationData.getClientToAMTokenMasterKey().array());
+//        applicationACLs = amRegistrationData.getApplicationACLs();
+//
+//        //tell the server what the ACLs are
+//        rpcService.getServer().refreshServiceAcl(serviceConf,
+//            new SliderAMPolicyProvider());
+//        if (securityConfiguration.isKeytabProvided()) {
+//          // perform keytab based login to establish kerberos authenticated
+//          // principal.  Can do so now since AM registration with RM above required
+//          // tokens associated to principal
+//          String principal = securityConfiguration.getPrincipal();
+//          //TODO read key tab file from slider-am.xml
+//          File localKeytabFile = new File("todo");
+////              securityConfiguration.getKeytabFile(new AggregateConf());
+//          // Now log in...
+//          login(principal, localKeytabFile);
+//          // obtain new FS reference that should be kerberos based and different
+//          // than the previously cached reference
+//          fs = new SliderFileSystem(serviceConf);
+//        }
+//      }
 
       // YARN client.
       // Important: this is only valid at startup, and must be executed within
@@ -1010,22 +1003,12 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
    * Process the initial user to obtain the set of user
    * supplied credentials (tokens were passed in by client).
    * Removes the AM/RM token.
-   * If a keytab has been provided, also strip the HDFS delegation token.
-   * @param securityConfig slider security config
    * @throws IOException
    */
-  private void processAMCredentials(SecurityConfiguration securityConfig)
+  private void stripAMRMToken()
       throws IOException {
-
     List<Text> filteredTokens = new ArrayList<>(3);
     filteredTokens.add(AMRMTokenIdentifier.KIND_NAME);
-    filteredTokens.add(TimelineDelegationTokenIdentifier.KIND_NAME);
-
-    boolean keytabProvided = securityConfig.isKeytabProvided();
-    log.info("Slider AM Security Mode: {}", keytabProvided ? "KEYTAB" : "TOKEN");
-    if (keytabProvided) {
-      filteredTokens.add(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
-    }
     containerCredentials = CredentialUtils.filterTokens(
         UserGroupInformation.getCurrentUser().getCredentials(),
         filteredTokens);
@@ -1946,24 +1929,6 @@ public class SliderAppMaster extends AbstractSliderLaunchedService
     nmClientAsync.startContainerAsync(container, ctx);
   }
 
-  /**
-   * Build the credentials needed for containers. This will include
-   * getting new delegation tokens for HDFS if the AM is running
-   * with a keytab.
-   * @return a buffer of credentials
-   * @throws IOException
-   */
-
-  private Credentials buildContainerCredentials() throws IOException {
-    Credentials credentials = new Credentials(containerCredentials);
-    if (securityConfiguration.isKeytabProvided()) {
-      CredentialUtils.addSelfRenewableFSDelegationTokens(
-          getClusterFS().getFileSystem(),
-          credentials);
-    }
-    return credentials;
-  }
-
   @Override //  NMClientAsync.CallbackHandler 
   public void onContainerStopped(ContainerId containerId) {
     // do nothing but log: container events from the AM

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
index a660958..220f2ca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
@@ -19,7 +19,6 @@
 package org.apache.slider.server.appmaster.actions;
 
 import org.apache.slider.api.proto.Messages;
-import org.apache.slider.core.conf.ConfTree;
 import org.apache.slider.server.appmaster.SliderAppMaster;
 import org.apache.slider.server.appmaster.state.AppState;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
index 4d483c7..7830a1e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolPBImpl.java
@@ -222,73 +222,4 @@ public class SliderClusterProtocolPBImpl implements SliderClusterProtocolPB {
       throw wrap(e);
     }
   }
-
-  @Override
-  public Messages.WrappedJsonProto getModelDesired(RpcController controller,
-      Messages.EmptyPayloadProto request) throws ServiceException {
-    try {
-      return real.getModelDesired(request);
-    } catch (Exception e) {
-      throw wrap(e);
-    }
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getModelDesiredAppconf(RpcController controller,
-      Messages.EmptyPayloadProto request) throws ServiceException {
-    try {
-      return real.getModelDesiredAppconf(request);
-    } catch (Exception e) {
-      throw wrap(e);
-    }  }
-
-  @Override
-  public Messages.WrappedJsonProto getModelDesiredResources(RpcController controller,
-      Messages.EmptyPayloadProto request) throws ServiceException {
-    try {
-      return real.getModelDesiredResources(request);
-    } catch (Exception e) {
-      throw wrap(e);
-    }
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getModelResolved(RpcController controller,
-      Messages.EmptyPayloadProto request) throws ServiceException {
-    try {
-      return real.getModelResolved(request);
-    } catch (Exception e) {
-      throw wrap(e);
-    }
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getModelResolvedAppconf(RpcController controller,
-      Messages.EmptyPayloadProto request) throws ServiceException {
-    try {
-      return real.getModelResolvedAppconf(request);
-    } catch (Exception e) {
-      throw wrap(e);
-    }
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getModelResolvedResources(RpcController controller,
-      Messages.EmptyPayloadProto request) throws ServiceException {
-    try {
-      return real.getModelResolvedResources(request);
-    } catch (Exception e) {
-      throw wrap(e);
-    }
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getLiveResources(RpcController controller,
-      Messages.EmptyPayloadProto request) throws ServiceException {
-    try {
-      return real.getLiveResources(request);
-    } catch (Exception e) {
-      throw wrap(e);
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
index c60d609..1902ec1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderClusterProtocolProxy.java
@@ -267,67 +267,4 @@ public class SliderClusterProtocolProxy implements SliderClusterProtocol {
       throw convert(e);
     }
   }
-
-  @Override
-  public Messages.WrappedJsonProto getModelDesired(Messages.EmptyPayloadProto request) throws IOException {
-    try {
-      return endpoint.getModelDesired(NULL_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getModelDesiredAppconf(Messages.EmptyPayloadProto request) throws IOException {
-    try {
-      return endpoint.getModelDesiredAppconf(NULL_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getModelDesiredResources(Messages.EmptyPayloadProto request) throws IOException {
-    try {
-      return endpoint.getModelDesiredResources(NULL_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getModelResolved(Messages.EmptyPayloadProto request) throws IOException {
-    try {
-      return endpoint.getModelResolved(NULL_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getModelResolvedAppconf(Messages.EmptyPayloadProto request) throws IOException {
-    try {
-      return endpoint.getModelResolvedAppconf(NULL_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getModelResolvedResources(Messages.EmptyPayloadProto request) throws IOException {
-    try {
-      return endpoint.getModelResolvedResources(NULL_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw convert(e);
-    }
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getLiveResources(Messages.EmptyPayloadProto request) throws IOException {
-    try {
-      return endpoint.getLiveResources(NULL_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw convert(e);
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
index 344495b..eaa0a81 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/SliderIPCService.java
@@ -32,12 +32,8 @@ import org.apache.slider.api.types.ComponentInformation;
 import org.apache.slider.api.types.ContainerInformation;
 import org.apache.slider.api.types.NodeInformation;
 import org.apache.slider.api.types.NodeInformationList;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTree;
 import org.apache.slider.core.exceptions.ServiceNotReadyException;
 import org.apache.slider.core.main.LauncherExitCodes;
-import org.apache.slider.core.persist.AggregateConfSerDeser;
-import org.apache.slider.core.persist.ConfTreeSerDeser;
 import org.apache.slider.core.persist.JsonSerDeser;
 import org.apache.slider.server.appmaster.AppMasterActionOperations;
 import org.apache.slider.server.appmaster.actions.ActionFlexCluster;
@@ -401,70 +397,6 @@ public class SliderIPCService extends AbstractService
     }
   }
 
-  @Override
-  public Messages.WrappedJsonProto getModelDesired(Messages.EmptyPayloadProto request) throws IOException {
-    return lookupAggregateConf(MODEL_DESIRED);
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getModelDesiredAppconf(Messages.EmptyPayloadProto request) throws IOException {
-    return lookupConfTree(MODEL_DESIRED_APPCONF);
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getModelDesiredResources(Messages.EmptyPayloadProto request) throws IOException {
-    return lookupConfTree(MODEL_DESIRED_RESOURCES);
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getModelResolved(Messages.EmptyPayloadProto request) throws IOException {
-    return lookupAggregateConf(MODEL_RESOLVED);
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getModelResolvedAppconf(Messages.EmptyPayloadProto request) throws IOException {
-    return lookupConfTree(MODEL_RESOLVED_APPCONF);
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getModelResolvedResources(Messages.EmptyPayloadProto request) throws IOException {
-    return lookupConfTree(MODEL_RESOLVED_RESOURCES);
-  }
-
-  @Override
-  public Messages.WrappedJsonProto getLiveResources(Messages.EmptyPayloadProto request) throws IOException {
-    return lookupConfTree(LIVE_RESOURCES);
-  }
-
-  /**
-   * Helper method; look up an aggregate configuration in the cache from
-   * a key, or raise an exception
-   * @param key key to resolve
-   * @return the configuration
-   * @throws IOException on a failure
-   */
-
-  protected Messages.WrappedJsonProto lookupAggregateConf(String key) throws
-      IOException {
-    AggregateConf aggregateConf = (AggregateConf) cache.lookupWithIOE(key);
-    String json = AggregateConfSerDeser.toString(aggregateConf);
-    return wrap(json);
-  }
-
-  /**
-   * Helper method; look up an conf tree in the cache from
-   * a key, or raise an exception
-   * @param key key to resolve
-   * @return the configuration
-   * @throws IOException on a failure
-   */
-  protected Messages.WrappedJsonProto lookupConfTree(String key) throws
-      IOException {
-    ConfTree conf = (ConfTree) cache.lookupWithIOE(key);
-    String json = ConfTreeSerDeser.toString(conf);
-    return wrap(json);
-  }
-
   private Messages.WrappedJsonProto wrap(String json) {
     Messages.WrappedJsonProto.Builder builder =
         Messages.WrappedJsonProto.newBuilder();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
index b31babc..37c730f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
@@ -16,21 +16,9 @@
  */
 package org.apache.slider.server.appmaster.security;
 
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-import static org.apache.slider.core.main.LauncherExitCodes.EXIT_UNAUTHORIZED;
-import org.apache.slider.common.SliderKeys;
-import org.apache.slider.common.SliderXmlConfKeys;
-import org.apache.slider.common.tools.SliderUtils;
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.exceptions.SliderException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.io.IOException;
-
 /**
  * Class keeping code security information
  */
@@ -38,126 +26,111 @@ public class SecurityConfiguration {
 
   protected static final Logger log =
       LoggerFactory.getLogger(SecurityConfiguration.class);
-  private final Configuration configuration;
-  private final AggregateConf instanceDefinition;
   private String clusterName;
 
-  public SecurityConfiguration(Configuration configuration,
-                               AggregateConf instanceDefinition,
-                               String clusterName) throws SliderException {
-    Preconditions.checkNotNull(configuration);
-    Preconditions.checkNotNull(instanceDefinition);
-    Preconditions.checkNotNull(clusterName);
-    this.configuration = configuration;
-    this.instanceDefinition = instanceDefinition;
-    this.clusterName = clusterName;
-    validate();
-  }
-
-  private void validate() throws SliderException {
-    if (isSecurityEnabled()) {
-      String principal = instanceDefinition.getAppConfOperations()
-          .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
-      if(SliderUtils.isUnset(principal)) {
-        // if no login identity is available, fail
-        UserGroupInformation loginUser = null;
-        try {
-          loginUser = getLoginUser();
-        } catch (IOException e) {
-          throw new SliderException(EXIT_UNAUTHORIZED, e,
-                                    "No principal configured for the application and "
-                                    + "exception raised during retrieval of login user. "
-                                    + "Unable to proceed with application "
-                                    + "initialization.  Please ensure a value "
-                                    + "for %s exists in the application "
-                                    + "configuration or the login issue is addressed",
-                                    SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
-        }
-        if (loginUser == null) {
-          throw new SliderException(EXIT_UNAUTHORIZED,
-                                    "No principal configured for the application "
-                                    + "and no login user found. "
-                                    + "Unable to proceed with application "
-                                    + "initialization.  Please ensure a value "
-                                    + "for %s exists in the application "
-                                    + "configuration or the login issue is addressed",
-                                    SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
-        }
-      }
-      // ensure that either local or distributed keytab mechanism is enabled,
-      // but not both
-      String keytabFullPath = instanceDefinition.getAppConfOperations()
-          .getComponent(SliderKeys.COMPONENT_AM)
-          .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
-      String keytabName = instanceDefinition.getAppConfOperations()
-          .getComponent(SliderKeys.COMPONENT_AM)
-          .get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
-      if (SliderUtils.isSet(keytabFullPath) && SliderUtils.isSet(keytabName)) {
-        throw new SliderException(EXIT_UNAUTHORIZED,
-                                  "Both a keytab on the cluster host (%s) and a"
-                                  + " keytab to be retrieved from HDFS (%s) are"
-                                  + " specified.  Please configure only one keytab"
-                                  + " retrieval mechanism.",
-                                  SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH,
-                                  SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
-
-      }
-    }
-  }
-
-  protected UserGroupInformation getLoginUser() throws IOException {
-    return UserGroupInformation.getLoginUser();
-  }
-
-  public boolean isSecurityEnabled () {
-    return SliderUtils.isHadoopClusterSecure(configuration);
-  }
-
-  public String getPrincipal () throws IOException {
-    String principal = instanceDefinition.getAppConfOperations()
-        .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
-    if (SliderUtils.isUnset(principal)) {
-      principal = UserGroupInformation.getLoginUser().getShortUserName();
-      log.info("No principal set in the slider configuration.  Will use AM login"
-               + " identity {} to attempt keytab-based login", principal);
-    }
-
-    return principal;
-  }
-
-  public boolean isKeytabProvided() {
-    boolean keytabProvided = instanceDefinition.getAppConfOperations()
-                    .getComponent(SliderKeys.COMPONENT_AM)
-                    .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH) != null ||
-                instanceDefinition.getAppConfOperations()
-                    .getComponent(SliderKeys.COMPONENT_AM).
-                    get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME) != null;
-    return keytabProvided;
-
-  }
-
-  public File getKeytabFile(AggregateConf instanceDefinition)
-      throws SliderException, IOException {
-    //TODO implement this for dash semantic
-    String keytabFullPath = instanceDefinition.getAppConfOperations()
-        .getComponent(SliderKeys.COMPONENT_AM)
-        .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
-    File localKeytabFile;
-    if (SliderUtils.isUnset(keytabFullPath)) {
-      // get the keytab
-      String keytabName = instanceDefinition.getAppConfOperations()
-          .getComponent(SliderKeys.COMPONENT_AM).
-              get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
-      log.info("No host keytab file path specified. Will attempt to retrieve"
-               + " keytab file {} as a local resource for the container",
-               keytabName);
-      // download keytab to local, protected directory
-      localKeytabFile = new File(SliderKeys.KEYTAB_DIR, keytabName);
-    } else {
-      log.info("Using host keytab file {} for login", keytabFullPath);
-      localKeytabFile = new File(keytabFullPath);
-    }
-    return localKeytabFile;
-  }
-
+//  private void validate() throws SliderException {
+//    if (isSecurityEnabled()) {
+//      String principal = instanceDefinition.getAppConfOperations()
+//          .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
+//      if(SliderUtils.isUnset(principal)) {
+//        // if no login identity is available, fail
+//        UserGroupInformation loginUser = null;
+//        try {
+//          loginUser = getLoginUser();
+//        } catch (IOException e) {
+//          throw new SliderException(EXIT_UNAUTHORIZED, e,
+//                                    "No principal configured for the application and "
+//                                    + "exception raised during retrieval of login user. "
+//                                    + "Unable to proceed with application "
+//                                    + "initialization.  Please ensure a value "
+//                                    + "for %s exists in the application "
+//                                    + "configuration or the login issue is addressed",
+//                                    SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
+//        }
+//        if (loginUser == null) {
+//          throw new SliderException(EXIT_UNAUTHORIZED,
+//                                    "No principal configured for the application "
+//                                    + "and no login user found. "
+//                                    + "Unable to proceed with application "
+//                                    + "initialization.  Please ensure a value "
+//                                    + "for %s exists in the application "
+//                                    + "configuration or the login issue is addressed",
+//                                    SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
+//        }
+//      }
+//      // ensure that either local or distributed keytab mechanism is enabled,
+//      // but not both
+//      String keytabFullPath = instanceDefinition.getAppConfOperations()
+//          .getComponent(SliderKeys.COMPONENT_AM)
+//          .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
+//      String keytabName = instanceDefinition.getAppConfOperations()
+//          .getComponent(SliderKeys.COMPONENT_AM)
+//          .get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
+//      if (SliderUtils.isSet(keytabFullPath) && SliderUtils.isSet(keytabName)) {
+//        throw new SliderException(EXIT_UNAUTHORIZED,
+//                                  "Both a keytab on the cluster host (%s) and a"
+//                                  + " keytab to be retrieved from HDFS (%s) are"
+//                                  + " specified.  Please configure only one keytab"
+//                                  + " retrieval mechanism.",
+//                                  SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH,
+//                                  SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
+//
+//      }
+//    }
+//  }
+//
+//  protected UserGroupInformation getLoginUser() throws IOException {
+//    return UserGroupInformation.getLoginUser();
+//  }
+//
+//  public boolean isSecurityEnabled () {
+//    return SliderUtils.isHadoopClusterSecure(configuration);
+//  }
+//
+//  public String getPrincipal () throws IOException {
+//    String principal = instanceDefinition.getAppConfOperations()
+//        .getComponent(SliderKeys.COMPONENT_AM).get(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL);
+//    if (SliderUtils.isUnset(principal)) {
+//      principal = UserGroupInformation.getLoginUser().getShortUserName();
+//      log.info("No principal set in the slider configuration.  Will use AM login"
+//               + " identity {} to attempt keytab-based login", principal);
+//    }
+//
+//    return principal;
+//  }
+//
+//  public boolean isKeytabProvided() {
+//    boolean keytabProvided = instanceDefinition.getAppConfOperations()
+//                    .getComponent(SliderKeys.COMPONENT_AM)
+//                    .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH) != null ||
+//                instanceDefinition.getAppConfOperations()
+//                    .getComponent(SliderKeys.COMPONENT_AM).
+//                    get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME) != null;
+//    return keytabProvided;
+//
+//  }
+//
+//  public File getKeytabFile(AggregateConf instanceDefinition)
+//      throws SliderException, IOException {
+//    //TODO implement this for dash semantic
+//    String keytabFullPath = instanceDefinition.getAppConfOperations()
+//        .getComponent(SliderKeys.COMPONENT_AM)
+//        .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
+//    File localKeytabFile;
+//    if (SliderUtils.isUnset(keytabFullPath)) {
+//      // get the keytab
+//      String keytabName = instanceDefinition.getAppConfOperations()
+//          .getComponent(SliderKeys.COMPONENT_AM).
+//              get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
+//      log.info("No host keytab file path specified. Will attempt to retrieve"
+//               + " keytab file {} as a local resource for the container",
+//               keytabName);
+//      // download keytab to local, protected directory
+//      localKeytabFile = new File(SliderKeys.KEYTAB_DIR, keytabName);
+//    } else {
+//      log.info("Using host keytab file {} for login", keytabFullPath);
+//      localKeytabFile = new File(keytabFullPath);
+//    }
+//    return localKeytabFile;
+//  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
index 84b8140..3d73f3b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
@@ -22,7 +22,6 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -35,7 +34,6 @@ import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.util.resource.Resources;
-import org.apache.slider.api.ClusterDescription;
 import org.apache.slider.api.ClusterNode;
 import org.apache.slider.api.InternalKeys;
 import org.apache.slider.api.StatusKeys;
@@ -82,7 +80,6 @@ import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import static org.apache.hadoop.metrics2.lib.Interns.info;
 import static org.apache.slider.api.ResourceKeys.*;
 import static org.apache.slider.api.StateValues.*;
 import static org.apache.slider.api.resource.ApplicationState.STARTED;
@@ -110,12 +107,6 @@ public class AppState {
 
   private Application app;
 
-
-  /**
-   * This is a template of the cluster status
-   */
-  private ClusterDescription clusterStatusTemplate = new ClusterDescription();
-
   private final Map<Integer, RoleStatus> roleStatusMap =
     new ConcurrentSkipListMap<>();
 
@@ -1325,59 +1316,6 @@ public class AppState {
     //TODO build container stats
     app.setState(ApplicationState.STARTED);
     return app;
-/*
-    return app;
-
-    ClusterDescription cd = getClusterStatus();
-    long now = now();
-    cd.setInfoTime(StatusKeys.INFO_STATUS_TIME_HUMAN,
-                   StatusKeys.INFO_STATUS_TIME_MILLIS,
-                   now);
-
-    MapOperations infoOps = new MapOperations("info", cd.info);
-    infoOps.mergeWithoutOverwrite(applicationInfo);
-    SliderUtils.addBuildInfo(infoOps, "status");
-    cd.statistics = new HashMap<>();
-
-    // build the map of node -> container IDs
-    Map<String, List<String>> instanceMap = createRoleToInstanceMap();
-    cd.instances = instanceMap;
-    
-    //build the map of node -> containers
-    Map<String, Map<String, ClusterNode>> clusterNodes =
-      createRoleToClusterNodeMap();
-    log.info("app state clusterNodes {} ", clusterNodes.toString());
-    cd.status = new HashMap<>();
-    cd.status.put(ClusterDescriptionKeys.KEY_CLUSTER_LIVE, clusterNodes);
-
-    for (RoleStatus role : getRoleStatusMap().values()) {
-      String rolename = role.getName();
-      List<String> instances = instanceMap.get(rolename);
-      int nodeCount = instances != null ? instances.size(): 0;
-      cd.setRoleOpt(rolename, COMPONENT_INSTANCES,
-                    role.getDesired());
-      cd.setRoleOpt(rolename, ROLE_ACTUAL_INSTANCES, nodeCount);
-      cd.setRoleOpt(rolename, ROLE_REQUESTED_INSTANCES, role.getRequested());
-      cd.setRoleOpt(rolename, ROLE_RELEASING_INSTANCES, role.getReleasing());
-      cd.setRoleOpt(rolename, ROLE_FAILED_INSTANCES, role.getFailed());
-      cd.setRoleOpt(rolename, ROLE_FAILED_STARTING_INSTANCES, role.getStartFailed());
-      cd.setRoleOpt(rolename, ROLE_FAILED_RECENTLY_INSTANCES, role.getFailedRecently());
-      cd.setRoleOpt(rolename, ROLE_NODE_FAILED_INSTANCES, role.getNodeFailed());
-      cd.setRoleOpt(rolename, ROLE_PREEMPTED_INSTANCES, role.getPreempted());
-      if (role.isAntiAffinePlacement()) {
-        cd.setRoleOpt(rolename, ROLE_PENDING_AA_INSTANCES, role.getPendingAntiAffineRequests());
-      }
-      Map<String, Integer> stats = role.buildStatistics();
-      cd.statistics.put(rolename, stats);
-    }
-
-    Map<String, Integer> sliderstats = getLiveStatistics();
-    cd.statistics.put(SliderKeys.COMPONENT_AM, sliderstats);
-
-    // liveness
-    cd.liveness = getApplicationLivenessInformation();
-
-    return cd;*/
   }
 
   /**
@@ -1390,7 +1328,6 @@ public class AppState {
     int outstanding = (int)(stats.desired - stats.actual);
     li.requestsOutstanding = outstanding;
     li.allRequestsSatisfied = outstanding <= 0;
-    li.activeRequests = (int)stats.requested;
     return li;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java
index e73dd87..44259d3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/AMWebServices.java
@@ -22,7 +22,6 @@ import org.apache.slider.api.resource.Application;
 import org.apache.slider.server.appmaster.web.WebAppApi;
 import org.apache.slider.server.appmaster.web.rest.application.actions.RestActionStop;
 import org.apache.slider.server.appmaster.web.rest.application.actions.StopResponse;
-import org.apache.slider.server.appmaster.web.rest.management.ManagementResource;
 import org.apache.slider.server.appmaster.web.rest.publisher.PublisherResource;
 import org.apache.slider.server.appmaster.web.rest.registry.RegistryResource;
 
@@ -46,22 +45,16 @@ public class AMWebServices {
   
   /** AM/WebApp info object */
   private WebAppApi slider;
-  private final ManagementResource managementResource;
   private final PublisherResource publisherResource;
   private final RegistryResource registryResource;
 
   @Inject
   public AMWebServices(WebAppApi slider) {
     this.slider = slider;
-    managementResource = new ManagementResource(slider);
     publisherResource = new PublisherResource(slider);
     registryResource = new RegistryResource(slider);
   }
-
-  @Path(RestPaths.SLIDER_SUBPATH_MANAGEMENT)
-  public ManagementResource getManagementResource() {
-    return managementResource;
-  }
+  //TODO add an endpoint for exposing configs
 
   @Path(RestPaths.SLIDER_SUBPATH_PUBLISHER)
   public PublisherResource getPublisherResource() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AggregateModelRefresher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AggregateModelRefresher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AggregateModelRefresher.java
deleted file mode 100644
index 261e66e..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AggregateModelRefresher.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.appmaster.web.rest.application.resources;
-
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.server.appmaster.state.StateAccessForProviders;
-
-/**
- * Refresh the aggregate desired model via
- */
-public class AggregateModelRefresher
-    implements ResourceRefresher<AggregateConf> {
-
-  private final StateAccessForProviders state;
-  private final boolean resolved;
-
-  public AggregateModelRefresher(StateAccessForProviders state,
-      boolean resolved) {
-    this.state = state;
-    this.resolved = resolved;
-  }
-
-  @Override
-  public AggregateConf refresh() throws Exception {
-    return new AggregateConf();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AppconfRefresher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AppconfRefresher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AppconfRefresher.java
deleted file mode 100644
index 190a51e..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/AppconfRefresher.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.appmaster.web.rest.application.resources;
-
-import org.apache.slider.core.conf.AggregateConf;
-import org.apache.slider.core.conf.ConfTree;
-import org.apache.slider.core.persist.ConfTreeSerDeser;
-import org.apache.slider.server.appmaster.state.StateAccessForProviders;
-
-/**
- * refresher for resources and application configuration
- */
-public class AppconfRefresher
-    implements ResourceRefresher<ConfTree> {
-
-  private final StateAccessForProviders state;
-  private final boolean unresolved;
-  private final boolean resources;
-
-  public AppconfRefresher(StateAccessForProviders state,
-      boolean unresolved,
-      boolean resources) {
-    this.state = state;
-    this.unresolved = unresolved;
-    this.resources = resources;
-  }
-
-
-  @Override
-  public ConfTree refresh() throws Exception {
-    AggregateConf aggregateConf = new AggregateConf();
-    ConfTree ct = resources ? aggregateConf.getResources() 
-                            : aggregateConf.getAppConf();
-    return new ConfTreeSerDeser().fromInstance(ct);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c82f36c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveComponentsRefresher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveComponentsRefresher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveComponentsRefresher.java
deleted file mode 100644
index b6627a7..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/application/resources/LiveComponentsRefresher.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.slider.server.appmaster.web.rest.application.resources;
-
-import org.apache.slider.api.types.ComponentInformation;
-import org.apache.slider.server.appmaster.state.StateAccessForProviders;
-
-import java.util.Map;
-
-public class LiveComponentsRefresher
-    implements ResourceRefresher<Map<String, ComponentInformation>> {
-
-  private final StateAccessForProviders state;
-
-  public LiveComponentsRefresher(StateAccessForProviders state) {
-    this.state = state;
-  }
-
-  @Override
-  public Map<String, ComponentInformation> refresh() {
-    return state.getComponentInfoSnapshot();
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org