You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2017/07/25 18:03:04 UTC

[22/50] [abbrv] hadoop git commit: YARN-6335. Port slider's groovy unit tests to yarn native services. Contributed by Billie Rinaldi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnMiniClusterTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnMiniClusterTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnMiniClusterTestBase.java
new file mode 100644
index 0000000..746a0ec
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnMiniClusterTestBase.java
@@ -0,0 +1,832 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.utils;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.service.ServiceOperations;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.MiniYARNCluster;
+import org.apache.slider.client.SliderClient;
+import org.apache.slider.common.SliderExitCodes;
+import org.apache.slider.common.SliderXmlConfKeys;
+import org.apache.slider.common.params.ActionFreezeArgs;
+import org.apache.slider.common.params.Arguments;
+import org.apache.slider.common.tools.Duration;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.ErrorStrings;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.core.main.ServiceLauncher;
+import org.apache.slider.server.appmaster.SliderAppMaster;
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import static org.apache.slider.utils.KeysForTests.*;
+
+/**
+ * Base class for mini cluster tests -creates a field for the
+ * mini yarn cluster.
+ */
+public abstract class YarnMiniClusterTestBase extends SliderTestBase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(YarnMiniClusterTestBase.class);
+
+  /**
+   * Mini YARN cluster only.
+   */
+  public static final int CLUSTER_GO_LIVE_TIME = 3 * 60 * 1000;
+  public static final int CLUSTER_STOP_TIME = 1 * 60 * 1000;
+  public static final int SIGTERM = -15;
+  public static final int SIGKILL = -9;
+  public static final int SIGSTOP = -17;
+  public static final String NO_ARCHIVE_DEFINED = "Archive configuration " +
+      "option not set: ";
+  /**
+   * RAM for the YARN containers: {@value}.
+   */
+  public static final String YRAM = "256";
+  public static final String FIFO_SCHEDULER = "org.apache.hadoop.yarn.server" +
+      ".resourcemanager.scheduler.fifo.FifoScheduler";
+  public static final YarnConfiguration SLIDER_CONFIG =
+      SliderUtils.createConfiguration();
+  private static boolean killSupported;
+
+  static {
+    SLIDER_CONFIG.setInt(SliderXmlConfKeys.KEY_AM_RESTART_LIMIT, 1);
+    SLIDER_CONFIG.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 100);
+    SLIDER_CONFIG.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
+    SLIDER_CONFIG.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
+    SLIDER_CONFIG
+        .setBoolean(SliderXmlConfKeys.KEY_SLIDER_AM_DEPENDENCY_CHECKS_DISABLED,
+            true);
+    SLIDER_CONFIG
+        .setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 1);
+  }
+
+
+  private int thawWaitTime = DEFAULT_THAW_WAIT_TIME_SECONDS * 1000;
+  private int freezeWaitTime = DEFAULT_TEST_FREEZE_WAIT_TIME_SECONDS * 1000;
+  private int sliderTestTimeout = DEFAULT_TEST_TIMEOUT_SECONDS * 1000;
+  private boolean teardownKillall = DEFAULT_TEARDOWN_KILLALL;
+
+  /**
+   * This is set in a system property.
+   */
+  @Rule
+  public Timeout testTimeout = new Timeout(
+      getTimeOptionMillis(getTestConfiguration(),
+          KEY_TEST_TIMEOUT,
+          DEFAULT_TEST_TIMEOUT_SECONDS * 1000)
+  );
+  private MiniDFSCluster hdfsCluster;
+  private MiniYARNCluster miniCluster;
+  private boolean switchToImageDeploy = false;
+  private boolean imageIsRemote = false;
+  private URI remoteImageURI;
+  private List<SliderClient> clustersToTeardown = new ArrayList<>();
+  private int clusterCount = 1;
+
+  /**
+   * Clent side test: validate system env before launch.
+   */
+  @BeforeClass
+  public static void checkClientEnv() throws IOException, SliderException {
+    SliderUtils.validateSliderClientEnvironment(null);
+  }
+
+  /**
+   * Work out if kill is supported.
+   */
+  @BeforeClass
+  public static void checkKillSupport() {
+    killSupported = !Shell.WINDOWS;
+  }
+
+  protected static boolean getKillSupported() {
+    return killSupported;
+  }
+
+  protected MiniYARNCluster getMiniCluster() {
+    return miniCluster;
+  }
+
+  /**
+   * Probe for the disks being healthy in a mini cluster. Only the first
+   * NM is checked.
+   *
+   * @param miniCluster
+   */
+  public static void assertMiniClusterDisksHealthy(
+      MiniYARNCluster miniCluster) {
+    boolean healthy = miniCluster.getNodeManager(
+        0).getNodeHealthChecker().getDiskHandler().areDisksHealthy();
+    assertTrue("Disks on test cluster unhealthy —may be full", healthy);
+  }
+
+  /**
+   * Inner work building the mini dfs cluster.
+   *
+   * @param name
+   * @param conf
+   * @return
+   */
+  public static MiniDFSCluster buildMiniHDFSCluster(
+      String name,
+      YarnConfiguration conf) throws IOException {
+    assertNativeLibrariesPresent();
+
+    File baseDir = new File("./target/hdfs", name).getAbsoluteFile();
+    //use file: to rm it recursively
+    FileUtil.fullyDelete(baseDir);
+    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
+    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
+
+    MiniDFSCluster cluster = builder.build();
+    return cluster;
+  }
+
+  public static String buildFsDefaultName(MiniDFSCluster miniDFSCluster) {
+    if (miniDFSCluster != null) {
+      return String.format("hdfs://localhost:%d/",
+          miniDFSCluster.getNameNodePort());
+    } else {
+      return "file:///";
+    }
+  }
+
+  /**
+   * Assert that an operation failed because a cluster is in use.
+   *
+   * @param e exception
+   */
+  public static void assertFailureClusterInUse(SliderException e) {
+    assertExceptionDetails(e,
+        SliderExitCodes.EXIT_APPLICATION_IN_USE,
+        ErrorStrings.E_CLUSTER_RUNNING);
+  }
+
+  protected String buildClustername(String clustername) {
+    if (SliderUtils.isSet(clustername)) {
+      return clustername;
+    } else {
+      return createClusterName();
+    }
+  }
+
+  /**
+   * Create the cluster name from the method name and an auto-incrementing
+   * counter.
+   *
+   * @return a cluster name
+   */
+  protected String createClusterName() {
+    String base = methodName.getMethodName().toLowerCase(Locale.ENGLISH);
+    if (clusterCount++ > 1) {
+      return String.format("%s-%d", base, clusterCount);
+    }
+    return base;
+  }
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    Configuration testConf = getTestConfiguration();
+    thawWaitTime = getTimeOptionMillis(testConf,
+        KEY_TEST_THAW_WAIT_TIME,
+        thawWaitTime);
+    freezeWaitTime = getTimeOptionMillis(testConf,
+        KEY_TEST_FREEZE_WAIT_TIME,
+        freezeWaitTime);
+    sliderTestTimeout = getTimeOptionMillis(testConf,
+        KEY_TEST_TIMEOUT,
+        sliderTestTimeout);
+    teardownKillall =
+        testConf.getBoolean(KEY_TEST_TEARDOWN_KILLALL,
+            teardownKillall);
+
+  }
+
+  @After
+  public void teardown() {
+    describe("teardown");
+    stopRunningClusters();
+    stopMiniCluster();
+  }
+
+  protected void addToTeardown(SliderClient client) {
+    clustersToTeardown.add(client);
+  }
+
+  protected void addToTeardown(ServiceLauncher<SliderClient> launcher) {
+    if (launcher != null) {
+      SliderClient sliderClient = launcher.getService();
+      if (sliderClient != null) {
+        addToTeardown(sliderClient);
+      }
+    }
+  }
+
+  /**
+   * Kill any java process with the given grep pattern.
+   *
+   * @param grepString string to grep for
+   */
+  public int killJavaProcesses(String grepString, int signal)
+      throws IOException, InterruptedException {
+
+    String[] commandString;
+    if (!Shell.WINDOWS) {
+      String killCommand = String.format(
+          "jps -l| grep %s | awk '{print $1}' | xargs kill %d", grepString,
+          signal);
+      LOG.info("Command command = {}", killCommand);
+
+      commandString = new String[]{"bash", "-c", killCommand};
+    } else {
+      // windows
+      if (!killSupported) {
+        return -1;
+      }
+
+      // "jps -l | grep "String" | awk "{print $1}" | xargs -n 1 taskkill /PID"
+      String killCommand = String.format(
+          "jps -l | grep %s | gawk '{print $1}' | xargs -n 1 taskkill /f " +
+              "/PID", grepString);
+      commandString = new String[]{"CMD", "/C", killCommand};
+    }
+
+    Process command = new ProcessBuilder(commandString).start();
+    int exitCode = command.waitFor();
+
+    logStdOutStdErr(command);
+    return exitCode;
+  }
+
+  /**
+   * Kill all processes which match one of the list of grepstrings.
+   *
+   * @param greps
+   * @param signal
+   */
+  public void killJavaProcesses(List<String> greps, int signal)
+      throws IOException, InterruptedException {
+    for (String grep : greps) {
+      killJavaProcesses(grep, signal);
+    }
+  }
+
+  protected YarnConfiguration getConfiguration() {
+    return SLIDER_CONFIG;
+  }
+
+  /**
+   * Stop any running cluster that has been added.
+   */
+  public void stopRunningClusters() {
+    for (SliderClient client : clustersToTeardown) {
+      try {
+        maybeStopCluster(client, "", "Teardown at end of test case", true);
+      } catch (Exception e) {
+        LOG.warn("While stopping cluster " + e, e);
+      }
+    }
+  }
+
+  public void stopMiniCluster() {
+    Log commonslog = LogFactory.getLog(YarnMiniClusterTestBase.class);
+    ServiceOperations.stopQuietly(commonslog, miniCluster);
+    if (hdfsCluster != null) {
+      hdfsCluster.shutdown();
+    }
+  }
+
+  /**
+   * Create and start a minicluster.
+   *
+   * @param name             cluster/test name; if empty one is created from
+   *                         the junit method
+   * @param conf             configuration to use
+   * @param noOfNodeManagers #of NMs
+   * @param numLocalDirs     #of local dirs
+   * @param numLogDirs       #of log dirs
+   * @param startHDFS        create an HDFS mini cluster
+   * @return the name of the cluster
+   */
+  protected String createMiniCluster(String name,
+      YarnConfiguration conf,
+      int noOfNodeManagers,
+      int numLocalDirs,
+      int numLogDirs,
+      boolean startHDFS) throws IOException {
+    assertNativeLibrariesPresent();
+    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
+    conf.set(YarnConfiguration.RM_SCHEDULER, FIFO_SCHEDULER);
+    patchDiskCapacityLimits(conf);
+    SliderUtils.patchConfiguration(conf);
+    name = buildClustername(name);
+    miniCluster = new MiniYARNCluster(
+        name,
+        noOfNodeManagers,
+        numLocalDirs,
+        numLogDirs);
+    miniCluster.init(conf);
+    miniCluster.start();
+    // health check
+    assertMiniClusterDisksHealthy(miniCluster);
+    if (startHDFS) {
+      createMiniHDFSCluster(name, conf);
+    }
+    return name;
+  }
+
+  public void patchDiskCapacityLimits(YarnConfiguration conf) {
+    conf.setFloat(
+        YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
+        99.0f);
+    conf.setInt(SliderXmlConfKeys.DFS_NAMENODE_DU_RESERVED_KEY,
+        2 * 1024 * 1024);
+    conf.setBoolean("yarn.nodemanager.disk-health-checker.enable", false);
+  }
+
+  /**
+   * Create a mini HDFS cluster and save it to the hdfsClusterField.
+   *
+   * @param name
+   * @param conf
+   */
+  public void createMiniHDFSCluster(String name, YarnConfiguration conf)
+      throws IOException {
+    hdfsCluster = buildMiniHDFSCluster(name, conf);
+  }
+
+  /**
+   * Launch the client with the specific args against the MiniMR cluster
+   * launcher i.e. expected to have successfully completed.
+   *
+   * @param conf configuration
+   * @param args arg list
+   * @return the return code
+   */
+  protected ServiceLauncher<SliderClient> launchClientAgainstMiniMR(
+      Configuration conf,
+      List args)
+      throws Throwable {
+    ServiceLauncher<SliderClient> launcher =
+        launchClientNoExitCodeCheck(conf, args);
+    int exited = launcher.getServiceExitCode();
+    if (exited != 0) {
+      throw new SliderException(exited, "Launch failed with exit code " +
+          exited);
+    }
+    return launcher;
+  }
+
+  /**
+   * Launch the client with the specific args against the MiniMR cluster
+   * without any checks for exit codes.
+   *
+   * @param conf configuration
+   * @param args arg list
+   * @return the return code
+   */
+  public ServiceLauncher<SliderClient> launchClientNoExitCodeCheck(
+      Configuration conf,
+      List args) throws Throwable {
+    assertNotNull(miniCluster);
+    return launchClientAgainstRM(getRMAddr(), args, conf);
+  }
+
+  /**
+   * Kill all Slider Services.
+   *
+   * @param signal
+   */
+  public int killAM(int signal) throws IOException, InterruptedException {
+    return killJavaProcesses(SliderAppMaster.SERVICE_CLASSNAME_SHORT, signal);
+  }
+
+  public void logStdOutStdErr(Process p) throws IOException {
+    try (BufferedReader br = new BufferedReader(new InputStreamReader(p
+        .getInputStream()))) {
+      String line = br.readLine();
+      while (line != null) {
+        LOG.info(line);
+        line = br.readLine();
+      }
+    }
+    try (BufferedReader br = new BufferedReader(new InputStreamReader(p
+        .getErrorStream()))) {
+      String line = br.readLine();
+      while (line != null) {
+        LOG.error(line);
+        line = br.readLine();
+      }
+    }
+  }
+
+  /**
+   * List any java process.
+   */
+  public void lsJavaProcesses() throws InterruptedException, IOException {
+    Process bash = new ProcessBuilder("jps", "-v").start();
+    bash.waitFor();
+    logStdOutStdErr(bash);
+  }
+
+  public YarnConfiguration getTestConfiguration() {
+    YarnConfiguration conf = getConfiguration();
+    conf.addResource(SLIDER_TEST_XML);
+    return conf;
+  }
+
+  protected String getRMAddr() {
+    assertNotNull(miniCluster);
+    String addr = miniCluster.getConfig().get(YarnConfiguration.RM_ADDRESS);
+    assertNotNull(addr != null);
+    assertNotEquals("", addr);
+    return addr;
+  }
+
+  /**
+   * Return the default filesystem, which is HDFS if the miniDFS cluster is
+   * up, file:// if not.
+   *
+   * @return a filesystem string to pass down
+   */
+  protected String getFsDefaultName() {
+    return buildFsDefaultName(hdfsCluster);
+  }
+
+  /**
+   * Delete with some pauses and backoff; designed to handle slow delete
+   * operation in windows.
+   */
+  public void rigorousDelete(
+      SliderFileSystem sliderFileSystem,
+      Path path, long timeout) throws IOException, SliderException {
+
+    if (path.toUri().getScheme() == "file") {
+      File dir = new File(path.toUri().getPath());
+      rigorousDelete(dir, timeout);
+    } else {
+      Duration duration = new Duration(timeout);
+      duration.start();
+      FileSystem dfs = sliderFileSystem.getFileSystem();
+      boolean deleted = false;
+      while (!deleted && !duration.getLimitExceeded()) {
+        dfs.delete(path, true);
+        deleted = !dfs.exists(path);
+        if (!deleted) {
+          try {
+            Thread.sleep(1000);
+          } catch (InterruptedException e) {
+            LOG.info("ignoring interrupted sleep");
+          }
+        }
+      }
+    }
+    sliderFileSystem.verifyDirectoryNonexistent(path);
+  }
+
+  /**
+   * Delete with some pauses and backoff; designed to handle slow delete
+   * operation in windows.
+   *
+   * @param dir     dir to delete
+   * @param timeout timeout in millis
+   */
+  public void rigorousDelete(File dir, long timeout) throws IOException {
+    Duration duration = new Duration(timeout);
+    duration.start();
+    boolean deleted = false;
+    while (!deleted && !duration.getLimitExceeded()) {
+      FileUtils.deleteQuietly(dir);
+      deleted = !dir.exists();
+      if (!deleted) {
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
+          LOG.info("ignoring interrupted sleep");
+        }
+      }
+    }
+    if (!deleted) {
+      // noisy delete raises an IOE
+      FileUtils.deleteDirectory(dir);
+    }
+  }
+
+  /**
+   * Add arguments to launch Slider with.
+   * <p>
+   * Extra arguments are added after standard arguments and before roles.
+   *
+   * @return additional arguments to launch Slider with
+   */
+  protected List<String> getExtraCLIArgs() {
+    return new ArrayList<>();
+  }
+
+  public String getConfDir() throws FileNotFoundException {
+    return getResourceConfDirURI();
+  }
+
+  /**
+   * Get the key for the application.
+   *
+   * @return
+   */
+  public String getApplicationHomeKey() {
+    failNotImplemented();
+    return null;
+  }
+
+  /**
+   * Get the archive path -which defaults to the local one.
+   *
+   * @return
+   */
+  public String getArchivePath() {
+    return getLocalArchive();
+  }
+
+  /**
+   * Get the local archive -the one defined in the test configuration.
+   *
+   * @return a possibly null/empty string
+   */
+  public final String getLocalArchive() {
+    return getTestConfiguration().getTrimmed(getArchiveKey());
+  }
+
+  /**
+   * Get the key for archives in tests.
+   *
+   * @return
+   */
+  public String getArchiveKey() {
+    failNotImplemented();
+    return null;
+  }
+
+  /**
+   * Merge a k-v pair into a simple k=v string; simple utility.
+   *
+   * @param key key
+   * @param val value
+   * @return the string to use after a -D option
+   */
+  public String define(String key, String val) {
+    return String.format("%s=%s", key, val);
+  }
+
+  public void assumeTestEnabled(boolean flag) {
+    assume(flag, "test disabled");
+  }
+
+  public void assumeArchiveDefined() {
+    String archive = getArchivePath();
+    boolean defined = archive != null && archive != "";
+    if (!defined) {
+      LOG.warn(NO_ARCHIVE_DEFINED + getArchiveKey());
+    }
+    assume(defined, NO_ARCHIVE_DEFINED + getArchiveKey());
+  }
+
+  /**
+   * Assume that application home is defined. This does not check that the
+   * path is valid -that is expected to be a failure on tests that require
+   * application home to be set.
+   */
+  public void assumeApplicationHome() {
+    String applicationHome = getApplicationHome();
+    assume(applicationHome != null && applicationHome != "",
+        "Application home dir option not set " + getApplicationHomeKey());
+  }
+
+  public String getApplicationHome() {
+    return getTestConfiguration().getTrimmed(getApplicationHomeKey());
+  }
+
+  public List<String> getImageCommands() {
+    if (switchToImageDeploy) {
+      // its an image that had better be defined
+      assertNotNull(getArchivePath());
+      if (!imageIsRemote) {
+        // its not remote, so assert it exists
+        File f = new File(getArchivePath());
+        assertTrue(f.exists());
+        return Arrays.asList(Arguments.ARG_IMAGE, f.toURI().toString());
+      } else {
+        assertNotNull(remoteImageURI);
+
+        // if it is remote, then its whatever the archivePath property refers to
+        return Arrays.asList(Arguments.ARG_IMAGE, remoteImageURI.toString());
+      }
+    } else {
+      assertNotNull(getApplicationHome());
+      assertTrue(new File(getApplicationHome()).exists());
+      return Arrays.asList(Arguments.ARG_APP_HOME, getApplicationHome());
+    }
+  }
+
+  /**
+   * Get the resource configuration dir in the source tree.
+   *
+   * @return
+   */
+  public File getResourceConfDir() throws FileNotFoundException {
+    File f = new File(getTestConfigurationPath()).getAbsoluteFile();
+    if (!f.exists()) {
+      throw new FileNotFoundException(
+          "Resource configuration directory " + f + " not found");
+    }
+    return f;
+  }
+
+  public String getTestConfigurationPath() {
+    failNotImplemented();
+    return null;
+  }
+
+  /**
+   * Get a URI string to the resource conf dir that is suitable for passing down
+   * to the AM -and works even when the default FS is hdfs.
+   */
+  public String getResourceConfDirURI() throws FileNotFoundException {
+    return getResourceConfDir().getAbsoluteFile().toURI().toString();
+  }
+
+  /**
+   * Log an application report.
+   *
+   * @param report
+   */
+  public void logReport(ApplicationReport report) {
+    LOG.info(SliderUtils.reportToString(report));
+  }
+
+  /**
+   * Stop the cluster via the stop action -and wait for
+   * {@link #CLUSTER_STOP_TIME} for the cluster to stop. If it doesn't
+   *
+   * @param sliderClient client
+   * @param clustername  cluster
+   * @return the exit code
+   */
+  public int clusterActionFreeze(SliderClient sliderClient, String clustername,
+      String message, boolean force)
+      throws IOException, YarnException {
+    LOG.info("Stopping cluster {}: {}", clustername, message);
+    ActionFreezeArgs freezeArgs = new ActionFreezeArgs();
+    freezeArgs.setWaittime(CLUSTER_STOP_TIME);
+    freezeArgs.message = message;
+    freezeArgs.force = force;
+    int exitCode = sliderClient.actionStop(clustername,
+        freezeArgs);
+    if (exitCode != 0) {
+      LOG.warn("Cluster stop failed with error code {}", exitCode);
+    }
+    return exitCode;
+  }
+
+  /**
+   * Teardown-time cluster termination; will stop the cluster iff the client
+   * is not null.
+   *
+   * @param sliderClient client
+   * @param clustername  name of cluster to teardown
+   * @return
+   */
+  public int maybeStopCluster(
+      SliderClient sliderClient,
+      String clustername,
+      String message,
+      boolean force) throws IOException, YarnException {
+    if (sliderClient != null) {
+      if (SliderUtils.isUnset(clustername)) {
+        clustername = sliderClient.getDeployedClusterName();
+      }
+      //only stop a cluster that exists
+      if (SliderUtils.isSet(clustername)) {
+        return clusterActionFreeze(sliderClient, clustername, message, force);
+      }
+    }
+    return 0;
+  }
+
+  public String roleMapToString(Map<String, Integer> roles) {
+    StringBuilder builder = new StringBuilder();
+    for (Entry<String, Integer> entry : roles.entrySet()) {
+      builder.append(entry.getKey());
+      builder.append("->");
+      builder.append(entry.getValue());
+      builder.append(" ");
+    }
+    return builder.toString();
+  }
+
+  /**
+   * Turn on test runs against a copy of the archive that is
+   * uploaded to HDFS -this method copies up the
+   * archive then switches the tests into archive mode.
+   */
+  public void enableTestRunAgainstUploadedArchive() throws IOException {
+    Path remotePath = copyLocalArchiveToHDFS(getLocalArchive());
+    // image mode
+    switchToRemoteImageDeploy(remotePath);
+  }
+
+  /**
+   * Switch to deploying a remote image.
+   *
+   * @param remotePath the remote path to use
+   */
+  public void switchToRemoteImageDeploy(Path remotePath) {
+    switchToImageDeploy = true;
+    imageIsRemote = true;
+    remoteImageURI = remotePath.toUri();
+  }
+
+  /**
+   * Copy a local archive to HDFS.
+   *
+   * @param localArchive local archive
+   * @return the path of the uploaded image
+   */
+  public Path copyLocalArchiveToHDFS(String localArchive) throws IOException {
+    assertNotNull(localArchive);
+    File localArchiveFile = new File(localArchive);
+    assertTrue(localArchiveFile.exists());
+    assertNotNull(hdfsCluster);
+    Path remoteUnresolvedArchive = new Path(localArchiveFile.getName());
+    assertTrue(FileUtil.copy(
+        localArchiveFile,
+        hdfsCluster.getFileSystem(),
+        remoteUnresolvedArchive,
+        false,
+        getTestConfiguration()));
+    Path remotePath = hdfsCluster.getFileSystem().resolvePath(
+        remoteUnresolvedArchive);
+    return remotePath;
+  }
+
+  /**
+   * Create a SliderFileSystem instance bonded to the running FS.
+   * The YARN cluster must be up and running already
+   *
+   * @return
+   */
+  public SliderFileSystem createSliderFileSystem()
+      throws URISyntaxException, IOException {
+    FileSystem dfs =
+        FileSystem.get(new URI(getFsDefaultName()), getConfiguration());
+    SliderFileSystem hfs = new SliderFileSystem(dfs, getConfiguration());
+    return hfs;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnZKMiniClusterTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnZKMiniClusterTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnZKMiniClusterTestBase.java
new file mode 100644
index 0000000..322b346
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/utils/YarnZKMiniClusterTestBase.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.utils;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.zk.BlockingZKWatcher;
+import org.apache.slider.core.zk.ZKIntegration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.apache.slider.utils.KeysForTests.USERNAME;
+
+/**
+ * Base class for mini cluster tests that use Zookeeper.
+ */
+public abstract class YarnZKMiniClusterTestBase extends
+    YarnMiniClusterTestBase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(YarnZKMiniClusterTestBase.class);
+
+  private MicroZKCluster microZKCluster;
+
+  public void stopMiniCluster() {
+    super.stopMiniCluster();
+    IOUtils.closeStream(microZKCluster);
+  }
+
+  public ZKIntegration createZKIntegrationInstance(String zkQuorum,
+      String clusterName,
+      boolean createClusterPath,
+      boolean canBeReadOnly,
+      int timeout) throws IOException, InterruptedException {
+    int sessionTimeout = ZKIntegration.SESSION_TIMEOUT;
+
+    BlockingZKWatcher watcher = new BlockingZKWatcher();
+    ZKIntegration zki = ZKIntegration.newInstance(zkQuorum,
+        USERNAME,
+        clusterName,
+        createClusterPath,
+        canBeReadOnly,
+        watcher,
+        sessionTimeout);
+    boolean fromCache = zki.init();
+    //here the callback may or may not have occurred.
+    //optionally wait for it
+    if (timeout > 0 && !fromCache) {
+      watcher.waitForZKConnection(timeout);
+    }
+    //if we get here, the binding worked
+    LOG.info("Connected: {}", zki);
+    return zki;
+  }
+
+  /**
+   * Wait for a flag to go true.
+   * @param connectedFlag
+   */
+  public void waitForZKConnection(AtomicBoolean connectedFlag, int timeout)
+      throws InterruptedException {
+    synchronized (connectedFlag) {
+      if (!connectedFlag.get()) {
+        LOG.info("waiting for ZK event");
+        //wait a bit
+        connectedFlag.wait(timeout);
+      }
+    }
+    assertTrue(connectedFlag.get());
+  }
+
+  /**
+   * Create and start a minicluster with ZK.
+   * @param name cluster/test name
+   * @param conf configuration to use
+   * @param noOfNodeManagers #of NMs
+   * @param numLocalDirs #of local dirs
+   * @param numLogDirs #of log dirs
+   * @param startZK create a ZK micro cluster *THIS IS IGNORED*
+   * @param startHDFS create an HDFS mini cluster
+   */
+  protected String createMiniCluster(String name,
+                                   YarnConfiguration conf,
+                                   int noOfNodeManagers,
+                                   int numLocalDirs,
+                                   int numLogDirs,
+                                   boolean startZK,
+                                   boolean startHDFS) throws IOException {
+    if (SliderUtils.isUnset(name)) {
+      name = methodName.getMethodName();
+    }
+    createMicroZKCluster("-" + name, conf);
+    conf.setBoolean(RegistryConstants.KEY_REGISTRY_ENABLED, true);
+    conf.set(RegistryConstants.KEY_REGISTRY_ZK_QUORUM, getZKBinding());
+    //now create the cluster
+    name = super.createMiniCluster(name, conf, noOfNodeManagers,
+        numLocalDirs, numLogDirs, startHDFS);
+
+    return name;
+  }
+
+  /**
+   * Create and start a minicluster.
+   * @param name cluster/test name
+   * @param conf configuration to use
+   * @param noOfNodeManagers #of NMs
+   * @param startZK create a ZK micro cluster
+   */
+  protected String createMiniCluster(String name,
+                                   YarnConfiguration conf,
+                                   int noOfNodeManagers,
+                                   boolean startZK) throws IOException {
+    return createMiniCluster(name, conf, noOfNodeManagers, 1, 1, startZK,
+        false);
+  }
+
+  /**
+   * Create and start a minicluster with the name from the test method.
+   * @param conf configuration to use
+   * @param noOfNodeManagers #of NMs
+   * @param startZK create a ZK micro cluster
+   */
+  protected String createMiniCluster(YarnConfiguration conf,
+      int noOfNodeManagers,
+      boolean startZK) throws IOException {
+    return createMiniCluster("", conf, noOfNodeManagers, 1, 1, startZK,
+        false);
+  }
+
+  public void createMicroZKCluster(String name, Configuration conf) {
+    microZKCluster = new MicroZKCluster(new Configuration(conf));
+    microZKCluster.createCluster(name);
+  }
+
+  public void assertHasZKCluster() {
+    assertNotNull(microZKCluster);
+  }
+
+  public String getZKBinding() {
+    if (microZKCluster == null) {
+      return "localhost:1";
+    } else {
+      return microZKCluster.getZkBindingString();
+    }
+  }
+
+  /**
+   * CLI args include all the ZK bindings needed.
+   * @return
+   */
+  protected List<String> getExtraCLIArgs() {
+    return Arrays.asList(
+      "-D", define(RegistryConstants.KEY_REGISTRY_ZK_QUORUM, getZKBinding())
+    );
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/log4j.properties
new file mode 100644
index 0000000..3adbaa4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/log4j.properties
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=INFO,stdout
+log4j.threshhold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
+
+log4j.appender.subprocess=org.apache.log4j.ConsoleAppender
+log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout
+log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n
+#log4j.logger.org.apache.slider.yarn.appmaster.SliderAppMasterer.master=INFO,subprocess
+
+log4j.logger.org.apache.slider=DEBUG
+log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG
+log4j.logger.org.apache.hadoop.yarn.registry=DEBUG
+
+#log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG
+#log4j.logger.org.apache.hadoop.yarn.service=DEBUG
+#log4j.logger.org.apache.hadoop.yarn.client=DEBUG
+
+#crank back on some noise
+log4j.logger.org.apache.hadoop.ipc.CallQueueManager=WARN
+
+log4j.logger.org.apache.hadoop.util.Shell=ERROR
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+log4j.logger.org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager=FATAL
+org.apache.hadoop.security.authentication.server.AuthenticationFilter=WARN
+log4j.logger.org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner=WARN
+log4j.logger.org.apache.hadoop.hdfs.server.blockmanagement=WARN
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN
+log4j.logger.org.apache.hadoop.hdfs=WARN
+log4j.logger.BlockStateChange=WARN
+
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN
+log4j.logger.org.apache.zookeeper=WARN
+log4j.logger.org.apache.zookeeper.ClientCnxn=FATAL
+
+log4j.logger.org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl=WARN
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeResourceMonitorImpl=ERROR
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.security=WARN
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher=WARN
+log4j.logger.org.apache.hadoop.metrics2=ERROR
+log4j.logger.org.apache.hadoop.util.HostsFileReader=WARN
+log4j.logger.org.apache.hadoop.yarn.event.AsyncDispatcher=WARN
+log4j.logger.org.apache.hadoop.security.token.delegation=WARN
+log4j.logger.org.apache.hadoop.yarn.util.AbstractLivelinessMonitor=WARN
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.security=WARN
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMNMInfo=WARN

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.txt b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.txt
deleted file mode 100644
index a1d7780..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.xml
deleted file mode 100644
index cb8eab2..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/metainfo.xml
+++ /dev/null
@@ -1,98 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <application>
-    <name>STORM</name>
-    <comment>Apache Hadoop Stream processing framework</comment>
-    <version>0.9.1.2.1</version>
-    <components>
-
-      <component>
-        <name>NIMBUS</name>
-        <category>MASTER</category>
-        <commandScript>
-          <script>scripts/nimbus.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>STORM_REST_API</name>
-        <category>MASTER</category>
-        <commandScript>
-          <script>scripts/rest_api.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>SUPERVISOR</name>
-        <category>SLAVE</category>
-        <commandScript>
-          <script>scripts/supervisor.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>STORM_UI_SERVER</name>
-        <category>MASTER</category>
-        <commandScript>
-          <script>scripts/ui_server.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>DRPC_SERVER</name>
-        <category>MASTER</category>
-        <commandScript>
-          <script>scripts/drpc_server.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-    </components>
-
-    <osSpecifics>
-      <osSpecific>
-        <osType>any</osType>
-        <packages>
-          <package>
-            <type>tarball</type>
-            <name>files/apache-storm-0.9.1.2.1.1.0-237.tar.gz</name>
-          </package>
-        </packages>
-      </osSpecific>
-    </osSpecifics>
-
-    <configFiles>
-      <configFile>
-        <type>xml</type>
-        <fileName>storm-site.xml</fileName>
-        <dictionaryName>storm-site</dictionaryName>
-      </configFile>
-    </configFiles>
-  </application>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.txt b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.txt
deleted file mode 100644
index a1d7780..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.xml
deleted file mode 100644
index f86e687..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/common/tools/test/someOtherFile.xml
+++ /dev/null
@@ -1,17 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo></metainfo>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override-resolved.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override-resolved.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override-resolved.json
new file mode 100644
index 0000000..e2a21ea
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override-resolved.json
@@ -0,0 +1,49 @@
+{
+  "name": "app-1",
+  "lifetime": "3600",
+  "configuration": {
+    "properties": {
+      "g1": "a",
+      "g2": "b"
+    }
+  },
+  "resource": {
+    "cpus": 1,
+    "memory": "512"
+  },
+  "number_of_containers": 2,
+  "components": [
+    {
+      "name": "simple",
+      "configuration": {
+        "properties": {
+          "g1": "a",
+          "g2": "b"
+        }
+      }
+    },
+    {
+      "name": "master",
+      "configuration": {
+        "properties": {
+          "g1": "overridden",
+          "g2": "b"
+        }
+      }
+    },
+    {
+      "name": "worker",
+      "resource": {
+        "cpus": 1,
+        "memory": "1024"
+      },
+      "configuration": {
+        "properties": {
+          "g1": "overridden-by-worker",
+          "g2": "b",
+          "timeout": "1000"
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override.json
new file mode 100644
index 0000000..552cdef
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-override.json
@@ -0,0 +1,43 @@
+{
+  "name": "app-1",
+  "lifetime": "3600",
+  "configuration": {
+    "properties": {
+      "g1": "a",
+      "g2": "b"
+    }
+  },
+  "resource": {
+    "cpus": 1,
+    "memory": "512"
+  },
+  "number_of_containers": 2,
+  "components": [
+    {
+      "name": "simple"
+    },
+    {
+      "name": "master",
+      "configuration": {
+        "properties": {
+          "name": "m",
+          "g1": "overridden"
+        }
+      }
+    },
+    {
+      "name": "worker",
+      "resource": {
+        "cpus": 1,
+        "memory": "1024"
+      },
+      "configuration": {
+        "properties": {
+          "name": "worker",
+          "g1": "overridden-by-worker",
+          "timeout": "1000"
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-resolved.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-resolved.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-resolved.json
new file mode 100644
index 0000000..cd1ab6f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app-resolved.json
@@ -0,0 +1,81 @@
+{
+  "name": "zk-app-1",
+  "lifetime": "3600",
+  "configuration": {
+    "properties": {
+      "internal.chaos.monkey.interval.seconds": "60",
+      "zookeeper.port": "2181",
+      "zookeeper.path": "/yarnapps_small_cluster",
+      "zookeeper.hosts": "zoo1,zoo2,zoo3",
+      "env.MALLOC_ARENA_MAX": "4",
+      "site.hbase.master.startup.retainassign": "true",
+      "site.fs.defaultFS": "hdfs://cluster:8020",
+      "site.fs.default.name": "hdfs://cluster:8020",
+      "site.hbase.master.info.port": "0",
+      "site.hbase.regionserver.info.port": "0"
+    }
+  },
+  "resource": {
+    "cpus": 1,
+    "memory": "512"
+  },
+  "number_of_containers": 2,
+  "components": [
+    {
+      "name": "simple",
+      "number_of_containers": 2,
+      "configuration": {
+        "properties": {
+          "g1": "a",
+          "g2": "b"
+        }
+      }
+    },
+    {
+      "name": "master",
+      "number_of_containers": 1,
+      "resource": {
+        "cpus": 1,
+        "memory": "512"
+      },
+      "configuration": {
+        "properties": {
+          "zookeeper.port": "2181",
+          "zookeeper.path": "/yarnapps_small_cluster",
+          "zookeeper.hosts": "zoo1,zoo2,zoo3",
+          "env.MALLOC_ARENA_MAX": "4",
+          "site.hbase.master.startup.retainassign": "true",
+          "site.fs.defaultFS": "hdfs://cluster:8020",
+          "site.fs.default.name": "hdfs://cluster:8020",
+          "site.hbase.master.info.port": "0",
+          "site.hbase.regionserver.info.port": "0",
+          "jvm.heapsize": "512M"
+        }
+      }
+    },
+    {
+      "name": "worker",
+      "number_of_containers": 5,
+      "resource": {
+        "cpus": 1,
+        "memory": "1024"
+      },
+      "configuration": {
+        "properties": {
+          "g1": "overridden-by-worker",
+          "g2": "b",
+          "zookeeper.port": "2181",
+          "zookeeper.path": "/yarnapps_small_cluster",
+          "zookeeper.hosts": "zoo1,zoo2,zoo3",
+          "env.MALLOC_ARENA_MAX": "4",
+          "site.hbase.master.startup.retainassign": "true",
+          "site.fs.defaultFS": "hdfs://cluster:8020",
+          "site.fs.default.name": "hdfs://cluster:8020",
+          "site.hbase.master.info.port": "0",
+          "site.hbase.regionserver.info.port": "0",
+          "jvm.heapsize": "512M"
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app.json
new file mode 100644
index 0000000..90857db
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app.json
@@ -0,0 +1,54 @@
+{
+  "name": "app-1",
+  "lifetime": "3600",
+  "configuration": {
+    "properties": {
+      "g1": "a",
+      "g2": "b",
+      "internal.chaos.monkey.interval.seconds": "60",
+      "zookeeper.port": "2181",
+      "zookeeper.path": "/yarnapps_small_cluster",
+      "zookeeper.hosts": "zoo1,zoo2,zoo3",
+      "env.MALLOC_ARENA_MAX": "4",
+      "site.hbase.master.startup.retainassign": "true",
+      "site.fs.defaultFS": "hdfs://cluster:8020",
+      "site.fs.default.name": "hdfs://cluster:8020",
+      "site.hbase.master.info.port": "0",
+      "site.hbase.regionserver.info.port": "0"
+    }
+  },
+  "resource": {
+    "cpus": 1,
+    "memory": "512"
+  },
+  "number_of_containers": 2,
+  "components": [
+    {
+      "name": "simple"
+    },
+    {
+      "name": "master",
+      "number_of_containers": 1,
+      "configuration": {
+        "properties": {
+          "g1": "overridden",
+          "jvm.heapsize": "512M"
+        }
+      }
+    },
+    {
+      "name": "worker",
+      "number_of_containers": 5,
+      "resource": {
+        "cpus": 1,
+        "memory": "1024"
+      },
+      "configuration": {
+        "properties": {
+          "g1": "overridden-by-worker",
+          "jvm.heapsize": "512M"
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/providers/agent/application/metadata/metainfo.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/providers/agent/application/metadata/metainfo.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/providers/agent/application/metadata/metainfo.xml
deleted file mode 100644
index fbe9299..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/providers/agent/application/metadata/metainfo.xml
+++ /dev/null
@@ -1,180 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <application>
-    <name>STORM</name>
-    <comment>Apache Hadoop Stream processing framework</comment>
-    <version>0.9.1.2.1</version>
-    <exportedConfigs>storm-site</exportedConfigs>
-
-    <exportGroups>
-      <exportGroup>
-        <name>QuickLinks</name>
-        <exports>
-          <export>
-            <name>app.jmx</name>
-            <value>http://${STORM_REST_API_HOST}:${site.global.rest_api_port}/api/cluster/summary</value>
-          </export>
-          <export>
-            <name>app.monitor</name>
-            <value>http://${STORM_UI_SERVER_HOST}:${site.storm-site.ui.port}</value>
-          </export>
-          <export>
-            <name>app.metrics</name>
-            <value>http://${site.global.ganglia_server_host}/cgi-bin/rrd.py?c=${site.global.ganglia_server_id}</value>
-          </export>
-          <export>
-            <name>ganglia.ui</name>
-            <value>http://${site.global.ganglia_server_host}/ganglia?c=${site.global.ganglia_server_id}</value>
-          </export>
-          <export>
-            <name>nimbus.url</name>
-            <value>http://${NIMBUS_HOST}:${site.storm-site.nimbus.thrift.port}</value>
-          </export>
-        </exports>
-      </exportGroup>
-    </exportGroups>
-
-    <commandOrders>
-      <commandOrder>
-        <command>NIMBUS-START</command>
-        <requires>SUPERVISOR-INSTALLED,STORM_UI_SERVER-INSTALLED,DRPC_SERVER-INSTALLED,STORM_REST_API-INSTALLED
-        </requires>
-      </commandOrder>
-      <commandOrder>
-        <command>SUPERVISOR-START</command>
-        <requires>NIMBUS-STARTED</requires>
-      </commandOrder>
-      <commandOrder>
-        <command>DRPC_SERVER-START</command>
-        <requires>NIMBUS-STARTED</requires>
-      </commandOrder>
-      <commandOrder>
-        <command>STORM_REST_API-START</command>
-        <requires>NIMBUS-STARTED,DRPC_SERVER-STARTED,STORM_UI_SERVER-STARTED</requires>
-      </commandOrder>
-      <commandOrder>
-        <command>STORM_UI_SERVER-START</command>
-        <requires>NIMBUS-STARTED</requires>
-      </commandOrder>
-    </commandOrders>
-
-    <components>
-
-      <component>
-        <name>NIMBUS</name>
-        <category>MASTER</category>
-        <autoStartOnFailure>true</autoStartOnFailure>
-        <appExports>QuickLinks-nimbus.url,QuickLinks-ganglia.ui,QuickLinks-app.metrics</appExports>
-        <commandScript>
-          <script>scripts/nimbus.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>STORM_REST_API</name>
-        <category>MASTER</category>
-        <autoStartOnFailure>true</autoStartOnFailure>
-        <appExports>QuickLinks-app.jmx</appExports>
-        <commandScript>
-          <script>scripts/rest_api.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>SUPERVISOR</name>
-        <category>SLAVE</category>
-        <autoStartOnFailure>true</autoStartOnFailure>
-        <componentExports>
-          <componentExport>
-            <name>log_viewer_port</name>
-            <value>${THIS_HOST}:${site.storm-site.logviewer.port}</value>
-          </componentExport>
-        </componentExports>
-        <commandScript>
-          <script>scripts/supervisor.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>STORM_UI_SERVER</name>
-        <category>MASTER</category>
-        <publishConfig>true</publishConfig>
-        <appExports>QuickLinks-app.monitor</appExports>
-        <autoStartOnFailure>true</autoStartOnFailure>
-        <commandScript>
-          <script>scripts/ui_server.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>DRPC_SERVER</name>
-        <category>MASTER</category>
-        <autoStartOnFailure>true</autoStartOnFailure>
-        <commandScript>
-          <script>scripts/drpc_server.py</script>
-          <scriptType>PYTHON</scriptType>
-          <timeout>600</timeout>
-        </commandScript>
-      </component>
-
-      <component>
-        <name>ANOTHER_COMPONENT</name>
-        <category>MASTER</category>
-        <commands>
-          <command>
-            <exec>start command</exec>
-          </command>
-          <command>
-            <exec>stop command</exec>
-            <name>STOP</name>
-          </command>
-        </commands>
-      </component>
-    </components>
-
-    <osSpecifics>
-      <osSpecific>
-        <osType>any</osType>
-        <packages>
-          <package>
-            <type>tarball</type>
-            <name>files/apache-storm-0.9.1.2.1.1.0-237.tar.gz</name>
-          </package>
-        </packages>
-      </osSpecific>
-    </osSpecifics>
-
-    <packages>
-      <package>
-        <type>tarball</type>
-        <name>test-tarball-name.tgz</name>
-      </package>
-    </packages>
-  </application>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/appmaster/web/rest/registry/sample.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/appmaster/web/rest/registry/sample.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/appmaster/web/rest/registry/sample.json
new file mode 100644
index 0000000..bc6429c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/appmaster/web/rest/registry/sample.json
@@ -0,0 +1,9 @@
+{
+  "nodes": ["/users/example/services/org-apache-slider/test-registry-rest-resources/components"], "service": {
+  "description": "Slider Application Master",
+  "yarn:id": "application_1411664296263_0001",
+  "yarn:persistence": 1,
+  "external": [],
+  "internal": []
+}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-3-role.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-3-role.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-3-role.json
new file mode 100644
index 0000000..ceab0a5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-3-role.json
@@ -0,0 +1,6 @@
+{"entry":{"org.apache.slider.server.avro.RoleHistoryHeader":{"version":1,"saved":1415296260647,"savedx":"149863b1a27","savedate":"6 Nov 2014 17:51:00 GMT","roles":3}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":1,"active":false,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":2,"active":false,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":0,"active":false,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.86","role":2,"active":true,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.RoleHistoryFooter":{"count":4}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-6-role.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-6-role.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-6-role.json
new file mode 100644
index 0000000..f1c53d5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-6-role.json
@@ -0,0 +1,8 @@
+{"entry":{"org.apache.slider.server.avro.RoleHistoryHeader":{"version":1,"saved":1415296260647,"savedx":"149863b1a27","savedate":"6 Nov 2014 17:51:00 GMT","roles":6}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":1,"active":false,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":2,"active":false,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":0,"active":false,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.86","role":4,"active":true,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.86","role":5,"active":true,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.86","role":6,"active":true,"last_used":0}}}
+{"entry":{"org.apache.slider.server.avro.RoleHistoryFooter":{"count":6}}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/256a1597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history_v01b_1_role.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history_v01b_1_role.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history_v01b_1_role.json
new file mode 100644
index 0000000..67d644f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history_v01b_1_role.json
@@ -0,0 +1,38 @@
+{
+  "entry": {
+    "org.apache.slider.server.avro.RoleHistoryHeader": {
+      "version": 1,
+      "saved": 1450435691617,
+      "savedx": "151b4b44461",
+      "savedate": "18 Dec 2015 10:48:11 GMT",
+      "roles": 2
+    }
+  }
+}
+{
+  "entry": {
+    "org.apache.slider.server.avro.RoleHistoryMapping": {
+      "rolemap": {
+        "echo": 1,
+        "slider-appmaster": 0
+      }
+    }
+  }
+}
+{
+  "entry": {
+    "org.apache.slider.server.avro.NodeEntryRecord": {
+      "host": "192.168.56.1",
+      "role": 1,
+      "active": true,
+      "last_used": 0
+    }
+  }
+}
+{
+  "entry": {
+    "org.apache.slider.server.avro.RoleHistoryFooter": {
+      "count": 1
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org