You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2016/06/13 21:19:14 UTC

[01/25] hadoop git commit: MAPREDUCE-6741. Refactor UncompressedSplitLineReader.fillBuffer(). Contributed by Daniel Templeton.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-1312 9d3bb1544 -> 982bee0a1


MAPREDUCE-6741. Refactor UncompressedSplitLineReader.fillBuffer(). Contributed by Daniel Templeton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b7b8a37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b7b8a37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b7b8a37

Branch: refs/heads/HDFS-1312
Commit: 0b7b8a377611b2a3041a2995504a437c36dfa6e6
Parents: 9581fb7
Author: Akira Ajisaka <aa...@apache.org>
Authored: Fri Jun 10 19:15:36 2016 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Fri Jun 10 19:15:36 2016 +0900

----------------------------------------------------------------------
 .../mapreduce/lib/input/UncompressedSplitLineReader.java     | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b7b8a37/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
index bda0218..c2b005b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
@@ -53,10 +53,10 @@ public class UncompressedSplitLineReader extends SplitLineReader {
       throws IOException {
     int maxBytesToRead = buffer.length;
     if (totalBytesRead < splitLength) {
-      long leftBytesForSplit = splitLength - totalBytesRead;
-      // check if leftBytesForSplit exceed Integer.MAX_VALUE
-      if (leftBytesForSplit <= Integer.MAX_VALUE) {
-        maxBytesToRead = Math.min(maxBytesToRead, (int)leftBytesForSplit);
+      long bytesLeftInSplit = splitLength - totalBytesRead;
+
+      if (bytesLeftInSplit < maxBytesToRead) {
+        maxBytesToRead = (int)bytesLeftInSplit;
       }
     }
     int bytesRead = in.read(buffer, 0, maxBytesToRead);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/25] hadoop git commit: YARN-4989. TestWorkPreservingRMRestart#testCapacitySchedulerRecovery fails intermittently. Contributed by Ajith S.

Posted by ae...@apache.org.
YARN-4989. TestWorkPreservingRMRestart#testCapacitySchedulerRecovery fails intermittently. Contributed by Ajith S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28b66ae9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28b66ae9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28b66ae9

Branch: refs/heads/HDFS-1312
Commit: 28b66ae919e348123f4c05a4787c9ec56c087c25
Parents: 49b4064
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Mon Jun 13 11:09:32 2016 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Mon Jun 13 11:09:32 2016 +0530

----------------------------------------------------------------------
 .../yarn/server/resourcemanager/TestWorkPreservingRMRestart.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28b66ae9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
index 7e5915b..3a60e02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
@@ -666,7 +666,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
     // Wait for RM to settle down on recovering containers;
     waitForNumContainersToRecover(2, rm2, am1_1.getApplicationAttemptId());
     waitForNumContainersToRecover(2, rm2, am1_2.getApplicationAttemptId());
-    waitForNumContainersToRecover(2, rm2, am1_2.getApplicationAttemptId());
+    waitForNumContainersToRecover(2, rm2, am2.getApplicationAttemptId());
 
     // Calculate each queue's resource usage.
     Resource containerResource = Resource.newInstance(1024, 1);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/25] hadoop git commit: HADOOP-13079. Add -q option to Ls to print ? instead of non-printable characters. Contributed by John Zhuge.

Posted by ae...@apache.org.
HADOOP-13079. Add -q option to Ls to print ? instead of non-printable characters. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0accc330
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0accc330
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0accc330

Branch: refs/heads/HDFS-1312
Commit: 0accc3306d830c3f2b16c4b8abf68729c7aba6cb
Parents: 28b66ae
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Jun 13 11:43:46 2016 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Jun 13 11:43:46 2016 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/shell/Ls.java     | 28 +++++--
 .../apache/hadoop/fs/shell/PrintableString.java | 72 ++++++++++++++++
 .../src/site/markdown/FileSystemShell.md        |  3 +-
 .../org/apache/hadoop/fs/TestFsShellList.java   | 78 ++++++++++++++++++
 .../hadoop/fs/shell/TestPrintableString.java    | 87 ++++++++++++++++++++
 .../apache/hadoop/fs/shell/package-info.java    | 26 ++++++
 .../src/test/resources/testConf.xml             |  6 +-
 7 files changed, 291 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0accc330/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
index 67348c6..47e87f5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
@@ -48,6 +48,7 @@ class Ls extends FsCommand {
   private static final String OPTION_PATHONLY = "C";
   private static final String OPTION_DIRECTORY = "d";
   private static final String OPTION_HUMAN = "h";
+  private static final String OPTION_HIDENONPRINTABLE = "q";
   private static final String OPTION_RECURSIVE = "R";
   private static final String OPTION_REVERSE = "r";
   private static final String OPTION_MTIME = "t";
@@ -55,10 +56,11 @@ class Ls extends FsCommand {
   private static final String OPTION_SIZE = "S";
 
   public static final String NAME = "ls";
-  public static final String USAGE = "[-" + OPTION_PATHONLY + "] [-"
-      + OPTION_DIRECTORY + "] [-" + OPTION_HUMAN + "] [-" + OPTION_RECURSIVE
-      + "] [-" + OPTION_MTIME + "] [-" + OPTION_SIZE + "] [-" + OPTION_REVERSE
-      + "] [-" + OPTION_ATIME + "] [<path> ...]";
+  public static final String USAGE = "[-" + OPTION_PATHONLY + "] [-" +
+      OPTION_DIRECTORY + "] [-" + OPTION_HUMAN + "] [-" +
+      OPTION_HIDENONPRINTABLE + "] [-" + OPTION_RECURSIVE + "] [-" +
+      OPTION_MTIME + "] [-" + OPTION_SIZE + "] [-" + OPTION_REVERSE + "] [-" +
+      OPTION_ATIME + "] [<path> ...]";
 
   public static final String DESCRIPTION =
       "List the contents that match the specified file pattern. If " +
@@ -77,6 +79,8 @@ class Ls extends FsCommand {
           "  -" + OPTION_HUMAN +
           "  Formats the sizes of files in a human-readable fashion\n" +
           "      rather than a number of bytes.\n" +
+          "  -" + OPTION_HIDENONPRINTABLE +
+          "  Print ? instead of non-printable characters.\n" +
           "  -" + OPTION_RECURSIVE +
           "  Recursively list the contents of directories.\n" +
           "  -" + OPTION_MTIME +
@@ -104,6 +108,9 @@ class Ls extends FsCommand {
 
   protected boolean humanReadable = false;
 
+  /** Whether to print ? instead of non-printable characters. */
+  private boolean hideNonPrintable = false;
+
   protected Ls() {}
 
   protected Ls(Configuration conf) {
@@ -119,14 +126,16 @@ class Ls extends FsCommand {
   @Override
   protected void processOptions(LinkedList<String> args)
   throws IOException {
-    CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, OPTION_PATHONLY,
-        OPTION_DIRECTORY, OPTION_HUMAN, OPTION_RECURSIVE, OPTION_REVERSE,
+    CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE,
+        OPTION_PATHONLY, OPTION_DIRECTORY, OPTION_HUMAN,
+        OPTION_HIDENONPRINTABLE, OPTION_RECURSIVE, OPTION_REVERSE,
         OPTION_MTIME, OPTION_SIZE, OPTION_ATIME);
     cf.parse(args);
     pathOnly = cf.getOpt(OPTION_PATHONLY);
     dirRecurse = !cf.getOpt(OPTION_DIRECTORY);
     setRecursive(cf.getOpt(OPTION_RECURSIVE) && dirRecurse);
     humanReadable = cf.getOpt(OPTION_HUMAN);
+    hideNonPrintable = cf.getOpt(OPTION_HIDENONPRINTABLE);
     orderReverse = cf.getOpt(OPTION_REVERSE);
     orderTime = cf.getOpt(OPTION_MTIME);
     orderSize = !orderTime && cf.getOpt(OPTION_SIZE);
@@ -163,6 +172,11 @@ class Ls extends FsCommand {
     return this.humanReadable;
   }
 
+  @InterfaceAudience.Private
+  private boolean isHideNonPrintable() {
+    return hideNonPrintable;
+  }
+
   /**
    * Should directory contents be displayed in reverse order
    * @return true reverse order, false default order
@@ -241,7 +255,7 @@ class Ls extends FsCommand {
         dateFormat.format(new Date(isUseAtime()
             ? stat.getAccessTime()
             : stat.getModificationTime())),
-        item);
+        isHideNonPrintable() ? new PrintableString(item.toString()) : item);
     out.println(line);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0accc330/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PrintableString.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PrintableString.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PrintableString.java
new file mode 100644
index 0000000..df68f40
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PrintableString.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.shell;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * The {code PrintableString} class converts any string to a printable string
+ * by replacing non-printable characters with ?.
+ *
+ * Categories of Unicode non-printable characters:
+ * <ul>
+ * <li> Control characters   (Cc)
+ * <li> Formatting Unicode   (Cf)
+ * <li> Private use Unicode  (Co)
+ * <li> Unassigned Unicode   (Cn)
+ * <li> Standalone surrogate (Unfortunately no matching Unicode category)
+ * </ul>
+ *
+ * @see Character
+ * @see <a href="http://www.unicode.org/">The Unicode Consortium</a>
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+class PrintableString {
+  private static final char REPLACEMENT_CHAR = '?';
+
+  private final String printableString;
+
+  PrintableString(String rawString) {
+    StringBuilder stringBuilder = new StringBuilder(rawString.length());
+    for (int offset = 0; offset < rawString.length();) {
+      int codePoint = rawString.codePointAt(offset);
+      offset += Character.charCount(codePoint);
+
+      switch (Character.getType(codePoint)) {
+      case Character.CONTROL:     // Cc
+      case Character.FORMAT:      // Cf
+      case Character.PRIVATE_USE: // Co
+      case Character.SURROGATE:   // Cs
+      case Character.UNASSIGNED:  // Cn
+        stringBuilder.append(REPLACEMENT_CHAR);
+        break;
+      default:
+        stringBuilder.append(Character.toChars(codePoint));
+        break;
+      }
+    }
+    printableString = stringBuilder.toString();
+  }
+
+  public String toString() {
+    return printableString;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0accc330/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index 5790bb7..1723426 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -384,13 +384,14 @@ Return usage output.
 ls
 ----
 
-Usage: `hadoop fs -ls [-C] [-d] [-h] [-R] [-t] [-S] [-r] [-u] <args> `
+Usage: `hadoop fs -ls [-C] [-d] [-h] [-q] [-R] [-t] [-S] [-r] [-u] <args> `
 
 Options:
 
 * -C: Display the paths of files and directories only.
 * -d: Directories are listed as plain files.
 * -h: Format file sizes in a human-readable fashion (eg 64.0m instead of 67108864).
+* -q: Print ? instead of non-printable characters.
 * -R: Recursively list subdirectories encountered.
 * -t: Sort output by modification time (most recent first).
 * -S: Sort output by file size.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0accc330/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
new file mode 100644
index 0000000..03720d3
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test FsShell -ls command.
+ */
+public class TestFsShellList {
+  private static Configuration conf;
+  private static FsShell shell;
+  private static LocalFileSystem lfs;
+  private static Path testRootDir;
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    conf = new Configuration();
+    shell = new FsShell(conf);
+    lfs = FileSystem.getLocal(conf);
+    lfs.setVerifyChecksum(true);
+    lfs.setWriteChecksum(true);
+
+    String root = System.getProperty("test.build.data", "test/build/data");
+    testRootDir = lfs.makeQualified(new Path(root, "testFsShellList"));
+    assertThat(lfs.mkdirs(testRootDir), is(true));
+  }
+
+  @AfterClass
+  public static void teardown() throws Exception {
+    lfs.delete(testRootDir, true);
+  }
+
+  private void createFile(Path filePath) throws Exception {
+    FSDataOutputStream out = lfs.create(filePath);
+    out.writeChars("I am " + filePath);
+    out.close();
+    assertThat(lfs.exists(lfs.getChecksumFile(filePath)), is(true));
+  }
+
+  @Test
+  public void testList() throws Exception {
+    createFile(new Path(testRootDir, "abc"));
+    String[] lsArgv = new String[]{"-ls", testRootDir.toString()};
+    assertThat(shell.run(lsArgv), is(0));
+
+    createFile(new Path(testRootDir, "abc\bd\tef"));
+    createFile(new Path(testRootDir, "ghi"));
+    createFile(new Path(testRootDir, "qq\r123"));
+    lsArgv = new String[]{"-ls", testRootDir.toString()};
+    assertThat(shell.run(lsArgv), is(0));
+
+    lsArgv = new String[]{"-ls", "-q", testRootDir.toString()};
+    assertThat(shell.run(lsArgv), is(0));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0accc330/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPrintableString.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPrintableString.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPrintableString.java
new file mode 100644
index 0000000..8e09fc2
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPrintableString.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.shell;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertThat;
+
+import org.junit.Test;
+
+/**
+ * Test {@code PrintableString} class.
+ */
+public class TestPrintableString {
+
+  private void expect(String reason, String raw, String expected) {
+    assertThat(reason, new PrintableString(raw).toString(), is(expected));
+  }
+
+  /**
+   * Test printable characters.
+   */
+  @Test
+  public void testPrintableCharacters() throws Exception {
+    // ASCII
+    expect("Should keep ASCII letter", "abcdef237", "abcdef237");
+    expect("Should keep ASCII symbol", " !\"|}~", " !\"|}~");
+
+    // Unicode BMP
+    expect("Should keep Georgian U+1050 and Box Drawing U+2533",
+        "\u1050\u2533--", "\u1050\u2533--");
+
+    // Unicode SMP
+    expect("Should keep Linear B U+10000 and Phoenician U+10900",
+        "\uD800\uDC00'''\uD802\uDD00", "\uD800\uDC00'''\uD802\uDD00");
+  }
+
+  /**
+   * Test non-printable characters.
+   */
+  @Test
+  public void testNonPrintableCharacters() throws Exception {
+    // Control characters
+    expect("Should replace single control character", "abc\rdef", "abc?def");
+    expect("Should replace multiple control characters",
+        "\babc\tdef", "?abc?def");
+    expect("Should replace all control characters", "\f\f\b\n", "????");
+    expect("Should replace mixed characters starting with a control",
+        "\027ab\0", "?ab?");
+
+    // Formatting Unicode
+    expect("Should replace Byte Order Mark", "-\uFEFF--", "-?--");
+    expect("Should replace Invisible Separator", "\u2063\t", "??");
+
+    // Private use Unicode
+    expect("Should replace private use U+E000", "\uE000", "?");
+    expect("Should replace private use U+E123 and U+F432",
+        "\uE123abc\uF432", "?abc?");
+    expect("Should replace private use in Plane 15 and 16: U+F0000 and " +
+        "U+10FFFD, but keep U+1050",
+        "x\uDB80\uDC00y\uDBFF\uDFFDz\u1050", "x?y?z\u1050");
+
+    // Unassigned Unicode
+    expect("Should replace unassigned U+30000 and U+DFFFF",
+        "-\uD880\uDC00-\uDB3F\uDFFF-", "-?-?-");
+
+    // Standalone surrogate character (not in a pair)
+    expect("Should replace standalone surrogate U+DB80", "x\uDB80yz", "x?yz");
+    expect("Should replace standalone surrogate mixed with valid pair",
+        "x\uDB80\uD802\uDD00yz", "x?\uD802\uDD00yz");
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0accc330/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/package-info.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/package-info.java
new file mode 100644
index 0000000..47a4e7a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/package-info.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package for {@code org.apache.hadoop.fs.shell} test classes.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.fs.shell;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0accc330/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index 3ad6d66..bbbc1ec 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -54,7 +54,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-ls \[-C\] \[-d\] \[-h\] \[-R\] \[-t\] \[-S\] \[-r\] \[-u\] \[&lt;path&gt; \.\.\.\] :( |\t)*</expected-output>
+          <expected-output>^-ls \[-C\] \[-d\] \[-h\] \[-q\] \[-R\] \[-t\] \[-S\] \[-r\] \[-u\] \[&lt;path&gt; \.\.\.\] :( |\t)*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
@@ -106,6 +106,10 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
+          <expected-output>^\s*-q\s+Print \? instead of non-printable characters\.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
           <expected-output>^\s*rather than a number of bytes\.( )*</expected-output>
         </comparator>
         <comparator>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/25] hadoop git commit: HADOOP-13243. TestRollingFileSystemSink.testSetInitialFlushTime() fails intermittently. (Daniel Templeton via kasha)

Posted by ae...@apache.org.
HADOOP-13243. TestRollingFileSystemSink.testSetInitialFlushTime() fails intermittently. (Daniel Templeton via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49b40646
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49b40646
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49b40646

Branch: refs/heads/HDFS-1312
Commit: 49b4064644b242921af4ddf1de9932bc7bcf5f0e
Parents: 7dae2b3
Author: Karthik Kambatla <ka...@cloudera.com>
Authored: Sun Jun 12 14:12:31 2016 -0700
Committer: Karthik Kambatla <ka...@cloudera.com>
Committed: Sun Jun 12 14:12:31 2016 -0700

----------------------------------------------------------------------
 .../hadoop/metrics2/sink/TestRollingFileSystemSink.java      | 8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49b40646/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSink.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSink.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSink.java
index 9c34dba..1273052 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSink.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSink.java
@@ -118,15 +118,14 @@ public class TestRollingFileSystemSink {
     diff = rfsSink.nextFlush.getTimeInMillis() - calendar.getTimeInMillis();
 
     assertTrue("The initial flush time was calculated incorrectly: " + diff,
-        (diff >= -1000L) && (diff < -900L));
+        (diff == 0L) || ((diff > -1000L) && (diff < -900L)));
 
     calendar.set(Calendar.MILLISECOND, 10);
     rfsSink.setInitialFlushTime(calendar.getTime());
     diff = rfsSink.nextFlush.getTimeInMillis() - calendar.getTimeInMillis();
 
     assertTrue("The initial flush time was calculated incorrectly: " + diff,
-        ((diff >= -10L) && (diff <= 0L) ||
-            ((diff > -1000L) && (diff < -910L))));
+        (diff >= -10L) && (diff <= 0L) || ((diff > -1000L) && (diff < -910L)));
 
     calendar.set(Calendar.SECOND, 1);
     calendar.set(Calendar.MILLISECOND, 10);
@@ -134,8 +133,7 @@ public class TestRollingFileSystemSink {
     diff = rfsSink.nextFlush.getTimeInMillis() - calendar.getTimeInMillis();
 
     assertTrue("The initial flush time was calculated incorrectly: " + diff,
-        ((diff >= -10L) && (diff <= 0L) ||
-            ((diff > -1000L) && (diff < -910L))));
+        (diff >= -10L) && (diff <= 0L) || ((diff > -1000L) && (diff < -910L)));
 
     // Now try pathological settings
     rfsSink = new RollingFileSystemSink(1000, 1000000);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/25] hadoop git commit: YARN-3426. Add jdiff support to YARN. (vinodkv via wangda)

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/03fc6b1b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml
new file mode 100644
index 0000000..f877336
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml
@@ -0,0 +1,3323 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Thu May 12 17:47:43 PDT 2016 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="hadoop-yarn-common 2.7.2"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-
 net-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils
 -core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0
 .0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.7.2.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/vinodkv/.m2/repo
 sitory/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1
 .9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_45.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons
 -io-2.4.jar:/Users/vinodkv/.m2/repository/com/google/inject/guice/3.0/guice-3.0.jar:/Users/vinodkv/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/vinodkv/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hado
 op-yarn-project/hadoop-yarn/hadoop-yarn-common/target/site/jdiff/xml -apiname hadoop-yarn-common 2.7.2 -->
+<package name="org.apache.hadoop.yarn">
+  <!-- start class org.apache.hadoop.yarn.ContainerLogAppender -->
+  <class name="ContainerLogAppender" extends="org.apache.log4j.FileAppender"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Flushable"/>
+    <constructor name="ContainerLogAppender"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="activateOptions"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="append"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+    </method>
+    <method name="flush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerLogDir" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Getter/Setter methods for log4j.]]>
+      </doc>
+    </method>
+    <method name="setContainerLogDir"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogDir" type="java.lang.String"/>
+    </method>
+    <method name="getContainerLogFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setContainerLogFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogFile" type="java.lang.String"/>
+    </method>
+    <method name="getTotalLogFileSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setTotalLogFileSize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="logSize" type="long"/>
+    </method>
+    <doc>
+    <![CDATA[A simple log4j-appender for container's logs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.ContainerLogAppender -->
+  <!-- start class org.apache.hadoop.yarn.ContainerRollingLogAppender -->
+  <class name="ContainerRollingLogAppender" extends="org.apache.log4j.RollingFileAppender"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Flushable"/>
+    <constructor name="ContainerRollingLogAppender"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="activateOptions"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="flush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerLogDir" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Getter/Setter methods for log4j.]]>
+      </doc>
+    </method>
+    <method name="setContainerLogDir"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogDir" type="java.lang.String"/>
+    </method>
+    <method name="getContainerLogFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setContainerLogFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogFile" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[A simple log4j-appender for container's logs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.ContainerRollingLogAppender -->
+  <!-- start class org.apache.hadoop.yarn.YarnUncaughtExceptionHandler -->
+  <class name="YarnUncaughtExceptionHandler" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Thread.UncaughtExceptionHandler"/>
+    <constructor name="YarnUncaughtExceptionHandler"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="uncaughtException"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="t" type="java.lang.Thread"/>
+      <param name="e" type="java.lang.Throwable"/>
+    </method>
+    <doc>
+    <![CDATA[This class is intended to be installed by calling
+ {@link Thread#setDefaultUncaughtExceptionHandler(UncaughtExceptionHandler)}
+ In the main entry point.  It is intended to try and cleanly shut down
+ programs using the Yarn Event framework.
+
+ Note: Right now it only will shut down the program if a Error is caught, but
+ not any other exception.  Anything else is just logged.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.YarnUncaughtExceptionHandler -->
+</package>
+<package name="org.apache.hadoop.yarn.api">
+</package>
+<package name="org.apache.hadoop.yarn.client">
+  <!-- start class org.apache.hadoop.yarn.client.AHSProxy -->
+  <class name="AHSProxy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AHSProxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAHSProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="ahsAddress" type="java.net.InetSocketAddress"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="rmAddress" type="java.net.InetSocketAddress"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.AHSProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.ClientRMProxy -->
+  <class name="ClientRMProxy" extends="org.apache.hadoop.yarn.client.RMProxy"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="createRMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="configuration" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a proxy to the ResourceManager for the specified protocol.
+ @param configuration Configuration with all the required information.
+ @param protocol Client protocol for which proxy is being requested.
+ @param <T> Type of proxy.
+ @return Proxy to the ResourceManager for the specified client protocol.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRMDelegationTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the token service name to be used for RMDelegationToken. Depending
+ on whether HA is enabled or not, this method generates the appropriate
+ service name as a comma-separated list of service addresses.
+
+ @param conf Configuration corresponding to the cluster we need the
+             RMDelegationToken for
+ @return - Service name for RMDelegationToken]]>
+      </doc>
+    </method>
+    <method name="getAMRMTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="address" type="java.lang.String"/>
+      <param name="defaultAddr" type="java.lang.String"/>
+      <param name="defaultPort" type="int"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.ClientRMProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.NMProxy -->
+  <class name="NMProxy" extends="org.apache.hadoop.yarn.client.ServerProxy"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMProxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createNMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="rpc" type="org.apache.hadoop.yarn.ipc.YarnRPC"/>
+      <param name="serverAddress" type="java.net.InetSocketAddress"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.NMProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.RMHAServiceTarget -->
+  <class name="RMHAServiceTarget" extends="org.apache.hadoop.ha.HAServiceTarget"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RMHAServiceTarget" type="org.apache.hadoop.yarn.conf.YarnConfiguration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="getAddress" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getZKFCAddress" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFencer" return="org.apache.hadoop.ha.NodeFencer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="checkFencingConfigured"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="BadFencingConfigurationException" type="org.apache.hadoop.ha.BadFencingConfigurationException"/>
+    </method>
+    <method name="isAutoFailoverEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.RMHAServiceTarget -->
+  <!-- start class org.apache.hadoop.yarn.client.RMProxy -->
+  <class name="RMProxy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RMProxy"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="This method is deprecated and is not used by YARN internally any more.
+ To create a proxy to the RM, use ClientRMProxy#createRMProxy or
+ ServerRMProxy#createRMProxy.
+
+ Create a proxy to the ResourceManager at the specified address.">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="rmAddress" type="java.net.InetSocketAddress"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@deprecated
+ This method is deprecated and is not used by YARN internally any more.
+ To create a proxy to the RM, use ClientRMProxy#createRMProxy or
+ ServerRMProxy#createRMProxy.
+
+ Create a proxy to the ResourceManager at the specified address.
+
+ @param conf Configuration to generate retry policy
+ @param protocol Protocol for the proxy
+ @param rmAddress Address of the ResourceManager
+ @param <T> Type information of the proxy
+ @return Proxy to the RM
+ @throws IOException]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.RMProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.ServerProxy -->
+  <class name="ServerProxy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ServerProxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRetryPolicy" return="org.apache.hadoop.io.retry.RetryPolicy"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="maxWaitTimeStr" type="java.lang.String"/>
+      <param name="defMaxWaitTime" type="long"/>
+      <param name="connectRetryIntervalStr" type="java.lang.String"/>
+      <param name="defRetryInterval" type="long"/>
+    </method>
+    <method name="createRetriableProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="user" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="rpc" type="org.apache.hadoop.yarn.ipc.YarnRPC"/>
+      <param name="serverAddress" type="java.net.InetSocketAddress"/>
+      <param name="retryPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.ServerProxy -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api">
+  <!-- start class org.apache.hadoop.yarn.client.api.TimelineClient -->
+  <class name="TimelineClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createTimelineClient" return="org.apache.hadoop.yarn.client.api.TimelineClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a timeline client. The current UGI when the user initialize the
+ client will be used to do the put and the delegation token operations. The
+ current user may use {@link UserGroupInformation#doAs} another user to
+ construct and initialize a timeline client if the following operations are
+ supposed to be conducted by that user.
+
+ @return a timeline client]]>
+      </doc>
+    </method>
+    <method name="putEntities" return="org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entities" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEntity[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a number of conceptual entities to the timeline
+ server. It is a blocking API. The method will not return until it gets the
+ response from the timeline server.
+ </p>
+
+ @param entities
+          the collection of {@link TimelineEntity}
+ @return the error information if the sent entities are not correctly stored
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="putDomain"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="domain" type="org.apache.hadoop.yarn.api.records.timeline.TimelineDomain"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a domain to the timeline server. It is a
+ blocking API. The method will not return until it gets the response from
+ the timeline server.
+ </p>
+
+ @param domain
+          an {@link TimelineDomain} object
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a delegation token so as to be able to talk to the timeline server in a
+ secure way.
+ </p>
+
+ @param renewer
+          Address of the renewer who can renew these tokens when needed by
+          securely talking to the timeline server
+ @return a delegation token ({@link Token}) that can be used to talk to the
+         timeline server
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="renewDelegationToken" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineDT" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Renew a timeline delegation token.
+ </p>
+
+ @param timelineDT
+          the delegation token to renew
+ @return the new expiration time
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="cancelDelegationToken"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineDT" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Cancel a timeline delegation token.
+ </p>
+
+ @param timelineDT
+          the delegation token to cancel
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A client library that can be used to post some information in terms of a
+ number of conceptual entities.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.TimelineClient -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api.impl">
+</package>
+<package name="org.apache.hadoop.yarn.event">
+  <!-- start class org.apache.hadoop.yarn.event.AbstractEvent -->
+  <class name="AbstractEvent" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.event.Event"/>
+    <constructor name="AbstractEvent" type="TYPE"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AbstractEvent" type="TYPE, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getTimestamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getType" return="TYPE"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Parent class of all the events. All events extend this class.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.event.AbstractEvent -->
+  <!-- start class org.apache.hadoop.yarn.event.AsyncDispatcher -->
+  <class name="AsyncDispatcher" extends="org.apache.hadoop.service.AbstractService"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.event.Dispatcher"/>
+    <constructor name="AsyncDispatcher"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AsyncDispatcher" type="java.util.concurrent.BlockingQueue"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="serviceInit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="serviceStart"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="setDrainEventsOnStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="serviceStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="dispatch"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.hadoop.yarn.event.Event"/>
+    </method>
+    <method name="register"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventType" type="java.lang.Class"/>
+      <param name="handler" type="org.apache.hadoop.yarn.event.EventHandler"/>
+    </method>
+    <method name="getEventHandler" return="org.apache.hadoop.yarn.event.EventHandler"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isEventThreadWaiting" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="isDrained" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <field name="eventDispatchers" type="java.util.Map"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Dispatches {@link Event}s in a separate thread. Currently only single thread
+ does that. Potentially there could be multiple channels for each event type
+ class and a thread pool can be used to dispatch the events.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.event.AsyncDispatcher -->
+  <!-- start interface org.apache.hadoop.yarn.event.Dispatcher -->
+  <interface name="Dispatcher"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getEventHandler" return="org.apache.hadoop.yarn.event.EventHandler"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="register"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventType" type="java.lang.Class"/>
+      <param name="handler" type="org.apache.hadoop.yarn.event.EventHandler"/>
+    </method>
+    <field name="DISPATCHER_EXIT_ON_ERROR_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_DISPATCHER_EXIT_ON_ERROR" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Event Dispatcher interface. It dispatches events to registered
+ event handlers based on event types.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.event.Dispatcher -->
+  <!-- start interface org.apache.hadoop.yarn.event.Event -->
+  <interface name="Event"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getType" return="TYPE"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTimestamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Interface defining events api.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.event.Event -->
+  <!-- start interface org.apache.hadoop.yarn.event.EventHandler -->
+  <interface name="EventHandler"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="handle"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="event" type="T"/>
+    </method>
+    <doc>
+    <![CDATA[Interface for handling events of type T
+
+ @param <T> parameterized event of type T]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.event.EventHandler -->
+</package>
+<package name="org.apache.hadoop.yarn.factories">
+</package>
+<package name="org.apache.hadoop.yarn.factory.providers">
+</package>
+<package name="org.apache.hadoop.yarn.logaggregation">
+  <!-- start class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat -->
+  <class name="AggregatedLogFormat" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AggregatedLogFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat -->
+  <!-- start class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey -->
+  <class name="AggregatedLogFormat.LogKey" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="AggregatedLogFormat.LogKey"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AggregatedLogFormat.LogKey" type="org.apache.hadoop.yarn.api.records.ContainerId"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AggregatedLogFormat.LogKey" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey -->
+  <!-- start class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader -->
+  <class name="AggregatedLogFormat.LogReader" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AggregatedLogFormat.LogReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="getApplicationOwner" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns the owner of the application.
+
+ @return the application owner.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAcls" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns ACLs for the application. An empty map is returned if no ACLs are
+ found.
+
+ @return a map of the Application ACLs.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="next" return="java.io.DataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read the next key and return the value-stream.
+
+ @param key
+ @return the valueStream if there are more keys or null otherwise.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAcontainerLogs"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="writer" type="java.io.Writer"/>
+      <param name="logUploadedTime" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes all logs for a single container to the provided writer.
+ @param valueStream
+ @param writer
+ @param logUploadedTime
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAcontainerLogs"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="writer" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes all logs for a single container to the provided writer.
+ @param valueStream
+ @param writer
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAContainerLogsForALogType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <param name="logUploadedTime" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ all types for a single container.
+
+ @param valueStream
+ @param out
+ @param logUploadedTime
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAContainerLogsForALogType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ all types for a single container.
+
+ @param valueStream
+ @param out
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader -->
+  <!-- start class org.apache.hadoop.yarn.logaggregation.LogCLIHelpers -->
+  <class name="LogCLIHelpers" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="LogCLIHelpers"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.LogCLIHelpers -->
+</package>
+<package name="org.apache.hadoop.yarn.nodelabels">
+  <!-- start class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager -->
+  <class name="CommonNodeLabelsManager" extends="org.apache.hadoop.service.AbstractService"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CommonNodeLabelsManager"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="handleStoreEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent"/>
+    </method>
+    <method name="initDispatcher"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="serviceInit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="initNodeLabelStore"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="startDispatcher"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="serviceStart"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="stopDispatcher"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="serviceStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="addToCluserNodeLabels"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labels" type="java.util.Set"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Add multiple node labels to repository
+
+ @param labels
+          new node labels added]]>
+      </doc>
+    </method>
+    <method name="checkAddLabelsToNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="addedLabelsToNode" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="addLabelsToNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="addedLabelsToNode" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[add more labels to nodes
+
+ @param addedLabelsToNode node {@literal ->} labels map]]>
+      </doc>
+    </method>
+    <method name="checkRemoveFromClusterNodeLabels"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="labelsToRemove" type="java.util.Collection"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="internalRemoveFromClusterNodeLabels"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="labelsToRemove" type="java.util.Collection"/>
+    </method>
+    <method name="removeFromClusterNodeLabels"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labelsToRemove" type="java.util.Collection"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Remove multiple node labels from repository
+
+ @param labelsToRemove
+          node labels to remove
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="checkRemoveLabelsFromNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="removeLabelsFromNode" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="removeNodeFromLabels"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="node" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <param name="labels" type="java.util.Set"/>
+    </method>
+    <method name="internalUpdateLabelsOnNodes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="nodeToLabels" type="java.util.Map"/>
+      <param name="op" type="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.NodeLabelUpdateOperation"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="removeLabelsFromNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="removeLabelsFromNode" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[remove labels from nodes, labels being removed most be contained by these
+ nodes
+
+ @param removeLabelsFromNode node {@literal ->} labels map]]>
+      </doc>
+    </method>
+    <method name="checkReplaceLabelsOnNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="replaceLabelsToNode" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="replaceLabelsOnNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="replaceLabelsToNode" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[replace labels to nodes
+
+ @param replaceLabelsToNode node {@literal ->} labels map]]>
+      </doc>
+    </method>
+    <method name="getNodeLabels" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get mapping of nodes to labels
+
+ @return nodes to labels map]]>
+      </doc>
+    </method>
+    <method name="getLabelsToNodes" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get mapping of labels to nodes for all the labels.
+
+ @return labels to nodes map]]>
+      </doc>
+    </method>
+    <method name="getLabelsToNodes" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labels" type="java.util.Set"/>
+      <doc>
+      <![CDATA[Get mapping of labels to nodes for specified set of labels.
+
+ @param labels set of labels for which labels to nodes mapping will be
+        returned.
+ @return labels to nodes map]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeLabels" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get existing valid labels in repository
+
+ @return existing valid labels in repository]]>
+      </doc>
+    </method>
+    <method name="normalizeLabel" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="label" type="java.lang.String"/>
+    </method>
+    <method name="getNMInNodeSet" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+    </method>
+    <method name="getNMInNodeSet" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <param name="map" type="java.util.Map"/>
+    </method>
+    <method name="getNMInNodeSet" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <param name="map" type="java.util.Map"/>
+      <param name="checkRunning" type="boolean"/>
+    </method>
+    <method name="getLabelsByNode" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+    </method>
+    <method name="getLabelsByNode" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <param name="map" type="java.util.Map"/>
+    </method>
+    <method name="createNodeIfNonExisted"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createHostIfNonExisted"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="hostName" type="java.lang.String"/>
+    </method>
+    <method name="normalizeNodeIdToLabels" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="nodeIdToLabels" type="java.util.Map"/>
+    </method>
+    <field name="LOG" type="org.apache.commons.logging.Log"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="EMPTY_STRING_SET" type="java.util.Set"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ANY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ACCESS_ANY_LABEL_SET" type="java.util.Set"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="WILDCARD_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NODE_LABELS_NOT_ENABLED_ERR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Error messages]]>
+      </doc>
+    </field>
+    <field name="NO_LABEL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[If a user doesn't specify label of a queue or node, it belongs
+ DEFAULT_LABEL]]>
+      </doc>
+    </field>
+    <field name="dispatcher" type="org.apache.hadoop.yarn.event.Dispatcher"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="labelCollections" type="java.util.concurrent.ConcurrentMap"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="nodeCollections" type="java.util.concurrent.ConcurrentMap"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="readLock" type="java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="writeLock" type="java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="store" type="org.apache.hadoop.yarn.nodelabels.NodeLabelsStore"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Host -->
+  <class name="CommonNodeLabelsManager.Host" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="protected"
+    deprecated="not deprecated">
+    <constructor name="CommonNodeLabelsManager.Host"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="copy" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Host"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="labels" type="java.util.Set"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="nms" type="java.util.Map"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A <code>Host</code> can have multiple <code>Node</code>s]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Host -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node -->
+  <class name="CommonNodeLabelsManager.Node" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="protected"
+    deprecated="not deprecated">
+    <constructor name="CommonNodeLabelsManager.Node" type="org.apache.hadoop.yarn.api.records.NodeId"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="copy" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="labels" type="java.util.Set"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="resource" type="org.apache.hadoop.yarn.api.records.Resource"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="running" type="boolean"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore -->
+  <class name="FileSystemNodeLabelsStore" extends="org.apache.hadoop.yarn.nodelabels.NodeLabelsStore"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FileSystemNodeLabelsStore" type="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="updateNodeToLabelsMappings"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeToLabels" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="storeNewClusterNodeLabels"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labels" type="java.util.Set"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="removeClusterNodeLabels"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labels" type="java.util.Collection"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="recover"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="LOG" type="org.apache.commons.logging.Log"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_DIR_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="MIRROR_FILENAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="EDITLOG_FILENAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore.SerializedLogType -->
+  <class name="FileSystemNodeLabelsStore.SerializedLogType" extends="java.lang.Enum"
+    abstract="false"
+    static="true" final="true" visibility="protected"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore.SerializedLogType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore.SerializedLogType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore.SerializedLogType -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.NodeLabel -->
+  <class name="NodeLabel" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="NodeLabel" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="NodeLabel" type="java.lang.String, org.apache.hadoop.yarn.api.records.Resource, int"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="addNodeId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="node" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+    </method>
+    <method name="removeNodeId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="node" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+    </method>
+    <method name="getAssociatedNodeIds" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="addNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeRes" type="org.apache.hadoop.yarn.api.records.Resource"/>
+    </method>
+    <method name="removeNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeRes" type="org.apache.hadoop.yarn.api.records.Resource"/>
+    </method>
+    <method name="getResource" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNumActiveNMs" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getLabelName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getCopy" return="org.apache.hadoop.yarn.nodelabels.NodeLabel"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.yarn.nodelabels.NodeLabel"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.NodeLabel -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.NodeLabelsStore -->
+  <class name="NodeLabelsStore" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Closeable"/>
+    <constructor name="NodeLabelsStore" type="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="updateNodeToLabelsMappings"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeToLabels" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Store node {@literal ->} label]]>
+      </doc>
+    </method>
+    <method name="storeNewClusterNodeLabels"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="label" type="java.util.Set"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Store new labels]]>
+      </doc>
+    </method>
+    <method name="removeClusterNodeLabels"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labels" type="java.util.Collection"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Remove labels]]>
+      </doc>
+    </method>
+    <method name="recover"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Recover labels and node to labels mappings from store]]>
+      </doc>
+    </method>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="getNodeLabelsManager" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="mgr" type="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.NodeLabelsStore -->
+</package>
+<package name="org.apache.hadoop.yarn.nodelabels.event">
+  <!-- start class org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent -->
+  <class name="NodeLabelsStoreEvent" extends="org.apache.hadoop.yarn.event.AbstractEvent"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeLabelsStoreEvent" type="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType -->
+  <class name="NodeLabelsStoreEventType" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.event.RemoveClusterNodeLabels -->
+  <class name="RemoveClusterNodeLabels" extends="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RemoveClusterNodeLabels" type="java.util.Collection"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getLabels" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.event.RemoveClusterNodeLabels -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.event.StoreNewClusterNodeLabels -->
+  <class name="StoreNewClusterNodeLabels" extends="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="StoreNewClusterNodeLabels" type="java.util.Set"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getLabels" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.event.StoreNewClusterNodeLabels -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.event.UpdateNodeToLabelsMappingsEvent -->
+  <class name="UpdateNodeToLabelsMappingsEvent" extends="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UpdateNodeToLabelsMappingsEvent" type="java.util.Map"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNodeToLabels" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.event.UpdateNodeToLabelsMappingsEvent -->
+</package>
+<package name="org.apache.hadoop.yarn.security">
+  <!-- start class org.apache.hadoop.yarn.security.AMRMTokenIdentifier -->
+  <class name="AMRMTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.AMRMTokenIdentifierProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND_NAME" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[AMRMTokenIdentifier is the TokenIdentifier to be used by
+ ApplicationMasters to authenticate to the ResourceManager.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.AMRMTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.AMRMTokenSelector -->
+  <class name="AMRMTokenSelector" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+    <constructor name="AMRMTokenSelector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.AMRMTokenSelector -->
+  <!-- start class org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo -->
+  <class name="ContainerManagerSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerManagerSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo -->
+  <!-- start class org.apache.hadoop.yarn.security.ContainerTokenIdentifier -->
+  <class name="ContainerTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor needed by RPC layer/SecretManager.]]>
+      </doc>
+    </constructor>
+    <method name="getContainerID" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationSubmitter" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNmHostAddress" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getResource" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getExpiryTimeStamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getMasterKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getCreationTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>


<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/25] hadoop git commit: Merge branch 'trunk' into HDFS-1312

Posted by ae...@apache.org.
Merge branch 'trunk' into HDFS-1312


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/982bee0a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/982bee0a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/982bee0a

Branch: refs/heads/HDFS-1312
Commit: 982bee0a1aa059feb831e5cd84c7bb43e97d497d
Parents: 9d3bb15 709a814
Author: Anu Engineer <ae...@apache.org>
Authored: Mon Jun 13 14:17:08 2016 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Mon Jun 13 14:17:08 2016 -0700

----------------------------------------------------------------------
 .../classification/tools/RootDocProcessor.java  |     4 +
 .../authentication/client/AuthenticatedURL.java |     4 +-
 .../hadoop-auth/src/site/markdown/Examples.md   |     4 +-
 .../java/org/apache/hadoop/fs/shell/Ls.java     |    28 +-
 .../apache/hadoop/fs/shell/PrintableString.java |    72 +
 .../hadoop/io/retry/RetryInvocationHandler.java |    12 +-
 .../src/site/markdown/FileSystemShell.md        |     3 +-
 .../org/apache/hadoop/fs/TestFsShellList.java   |    78 +
 .../hadoop/fs/shell/TestPrintableString.java    |    87 +
 .../apache/hadoop/fs/shell/package-info.java    |    26 +
 .../apache/hadoop/io/retry/TestRetryProxy.java  |     7 +-
 .../sink/TestRollingFileSystemSink.java         |     8 +-
 .../src/test/resources/testConf.xml             |     6 +-
 .../server/namenode/EncryptionZoneManager.java  |     2 +-
 .../server/namenode/FSDirEncryptionZoneOp.java  |     3 +
 .../src/main/webapps/hdfs/explorer.html         |     9 +
 .../src/main/webapps/hdfs/explorer.js           |    83 +-
 .../src/main/webapps/static/hadoop.css          |     7 +
 .../hadoop/hdfs/TestEncryptionZonesWithKMS.java |     7 +-
 .../lib/input/UncompressedSplitLineReader.java  |     8 +-
 .../jdiff/Apache_Hadoop_YARN_API_2.6.0.xml      | 13076 ++++++++++++++++
 .../jdiff/Apache_Hadoop_YARN_API_2.7.2.xml      | 13692 +++++++++++++++++
 .../jdiff/Apache_Hadoop_YARN_Client.2.6.0.xml   |  2427 +++
 .../jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml   |  2581 ++++
 .../jdiff/Apache_Hadoop_YARN_Common_2.6.0.xml   |  2870 ++++
 .../jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml   |  3323 ++++
 .../Apache_Hadoop_YARN_Server_Common_2.6.0.xml  |  2059 +++
 .../Apache_Hadoop_YARN_Server_Common_2.7.2.xml  |  1801 +++
 .../hadoop-yarn/dev-support/jdiff/Null.java     |    20 +
 .../hadoop-yarn/hadoop-yarn-api/pom.xml         |     2 +
 .../hadoop-yarn/hadoop-yarn-client/pom.xml      |     2 +
 .../hadoop/yarn/client/api/AMRMClient.java      |    43 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |    17 +
 .../yarn/client/api/impl/AMRMClientImpl.java    |   294 +-
 .../client/api/impl/RemoteRequestsTable.java    |   332 +
 .../client/api/impl/BaseAMRMProxyE2ETest.java   |   197 +
 .../yarn/client/api/impl/TestAMRMClient.java    |    26 +-
 .../impl/TestAMRMClientContainerRequest.java    |    54 +-
 .../api/impl/TestAMRMClientOnRMRestart.java     |    16 +-
 .../yarn/client/api/impl/TestAMRMProxy.java     |   171 +-
 .../api/impl/TestDistributedScheduling.java     |   644 +-
 .../yarn/client/api/impl/TestNMClient.java      |     7 +-
 .../src/test/resources/core-site.xml            |    25 -
 .../hadoop-yarn/hadoop-yarn-common/pom.xml      |     2 +
 .../records/impl/pb/ResourceRequestPBImpl.java  |     5 +-
 .../hadoop-yarn-server-common/pom.xml           |     2 +
 .../TestContainerManagerRegression.java         |    84 +
 .../resourcemanager/rmnode/RMNodeImpl.java      |    37 +
 .../scheduler/fair/FSAppAttempt.java            |    62 +-
 .../yarn/server/resourcemanager/MockNM.java     |    57 +-
 .../resourcemanager/TestAMAuthorization.java    |    22 +
 .../resourcemanager/TestClientRMTokens.java     |    22 +
 .../resourcemanager/TestRMNodeTransitions.java  |    44 +
 .../TestWorkPreservingRMRestart.java            |     2 +-
 .../scheduler/fair/TestFairScheduler.java       |    72 +
 hadoop-yarn-project/hadoop-yarn/pom.xml         |   129 +
 56 files changed, 43977 insertions(+), 700 deletions(-)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/25] hadoop git commit: YARN-5208. Run TestAMRMClient TestNMClient TestYarnClient TestClientRMTokens TestAMAuthorization tests with hadoop.security.token.service.use_ip enabled. (Rohith Sharma K S via wangda)

Posted by ae...@apache.org.
YARN-5208. Run TestAMRMClient TestNMClient TestYarnClient TestClientRMTokens TestAMAuthorization tests with hadoop.security.token.service.use_ip enabled. (Rohith Sharma K S via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/244506f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/244506f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/244506f9

Branch: refs/heads/HDFS-1312
Commit: 244506f9c8953029283aa8e0eb2006ae2f30025e
Parents: d44f474
Author: Wangda Tan <wa...@apache.org>
Authored: Fri Jun 10 09:34:32 2016 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Fri Jun 10 09:34:32 2016 -0700

----------------------------------------------------------------------
 .../api/impl/TestAMRMClientOnRMRestart.java     | 16 +++++++++++--
 .../src/test/resources/core-site.xml            | 25 --------------------
 .../resourcemanager/TestAMAuthorization.java    | 22 +++++++++++++++++
 .../resourcemanager/TestClientRMTokens.java     | 22 +++++++++++++++++
 4 files changed, 58 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/244506f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
index 0890396..719d9a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
@@ -27,6 +27,7 @@ import java.util.Iterator;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.security.SecurityUtil;
@@ -70,18 +71,22 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEv
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.util.Records;
+import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class TestAMRMClientOnRMRestart {
-  static Configuration conf = null;
+  static Configuration conf = new Configuration();
   static final int rolling_interval_sec = 13;
   static final long am_expire_ms = 4000;
 
   @BeforeClass
   public static void setup() throws Exception {
-    conf = new Configuration();
+    conf.setBoolean(
+        CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, false);
+    SecurityUtil.setConfiguration(conf);
+
     conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
     conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
@@ -90,6 +95,13 @@ public class TestAMRMClientOnRMRestart {
     conf.setLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 0);
   }
 
+  @AfterClass
+  public static void tearDown() {
+    conf.setBoolean(
+        CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, true);
+    SecurityUtil.setConfiguration(conf);
+  }
+
   // Test does major 6 steps verification.
   // Step-1 : AMRMClient send allocate request for 3 container requests
   // Step-2 : 3 containers are allocated by RM.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/244506f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/core-site.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/core-site.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/core-site.xml
deleted file mode 100644
index f0d3085..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/core-site.xml
+++ /dev/null
@@ -1,25 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>hadoop.security.token.service.use_ip</name>
-    <value>false</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/244506f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java
index c51cd87..0f88c79 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java
@@ -30,6 +30,7 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataInputByteBuffer;
 import org.apache.hadoop.security.AccessControlException;
@@ -63,7 +64,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.After;
+import org.junit.AfterClass;
 import org.junit.Assert;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -77,6 +80,25 @@ public class TestAMAuthorization {
   private final Configuration conf;
   private MockRM rm;
 
+  // Note : Any test case in ResourceManager package that creates a proxy has
+  // to be run with enabling hadoop.security.token.service.use_ip. And reset
+  // to false at the end of test class. See YARN-5208
+  @BeforeClass
+  public static void setUp() {
+    Configuration conf = new Configuration();
+    conf.setBoolean(
+        CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, true);
+    SecurityUtil.setConfiguration(conf);
+  }
+
+  @AfterClass
+  public static void resetConf() {
+    Configuration conf = new Configuration();
+    conf.setBoolean(
+        CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, false);
+    SecurityUtil.setConfiguration(conf);
+  }
+
   @Parameters
   public static Collection<Object[]> configs() {
     Configuration conf = new Configuration();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/244506f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java
index c21db4e..65145a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java
@@ -37,11 +37,13 @@ import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
 
 import org.apache.hadoop.net.NetUtils;
+import org.junit.AfterClass;
 import org.junit.Assert;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.Text;
@@ -72,6 +74,7 @@ import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 
@@ -79,6 +82,25 @@ public class TestClientRMTokens {
 
   private static final Log LOG = LogFactory.getLog(TestClientRMTokens.class);
   
+  // Note : Any test case in ResourceManager package that creates a proxy has
+  // to be run with enabling hadoop.security.token.service.use_ip. And reset
+  // to false at the end of test class. See YARN-5208
+  @BeforeClass
+  public static void setUp() {
+    Configuration conf = new Configuration();
+    conf.setBoolean(
+        CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, true);
+    SecurityUtil.setConfiguration(conf);
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    Configuration conf = new Configuration();
+    conf.setBoolean(
+        CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, false);
+    SecurityUtil.setConfiguration(conf);
+  }
+
   @Before
   public void resetSecretManager() {
     RMDelegationTokenIdentifier.Renewer.setSecretManager(null, null);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/25] hadoop git commit: YARN-5212. Run existing ContainerManager tests using QueuingContainerManagerImpl. (Konstantinos Karanasos via asuresh)

Posted by ae...@apache.org.
YARN-5212. Run existing ContainerManager tests using QueuingContainerManagerImpl. (Konstantinos Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7dae2b3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7dae2b3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7dae2b3b

Branch: refs/heads/HDFS-1312
Commit: 7dae2b3bc4cebbb186b3edd14e31074be02af329
Parents: 5143277
Author: Arun Suresh <as...@apache.org>
Authored: Sun Jun 12 10:05:37 2016 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Sun Jun 12 10:05:37 2016 -0700

----------------------------------------------------------------------
 .../TestContainerManagerRegression.java         | 84 ++++++++++++++++++++
 1 file changed, 84 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dae2b3b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRegression.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRegression.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRegression.java
new file mode 100644
index 0000000..71af76f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRegression.java
@@ -0,0 +1,84 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.NMTokenIdentifier;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.queuing.QueuingContainerManagerImpl;
+
+/**
+ * Test class that invokes all test cases of {@link TestContainerManager} while
+ * using the {@link QueuingContainerManagerImpl}. The goal is to assert that
+ * no regression is introduced in the existing cases when no queuing of tasks at
+ * the NMs is involved.
+ */
+public class TestContainerManagerRegression extends TestContainerManager {
+
+  public TestContainerManagerRegression()
+      throws UnsupportedFileSystemException {
+    super();
+  }
+
+  static {
+    LOG = LogFactory.getLog(TestContainerManagerRegression.class);
+  }
+
+  @Override
+  protected ContainerManagerImpl createContainerManager(
+      DeletionService delSrvc) {
+    return new QueuingContainerManagerImpl(context, exec, delSrvc,
+        nodeStatusUpdater, metrics, dirsHandler) {
+      @Override
+      public void
+          setBlockNewContainerRequests(boolean blockNewContainerRequests) {
+        // do nothing
+      }
+
+      @Override
+      protected UserGroupInformation getRemoteUgi() throws YarnException {
+        ApplicationId appId = ApplicationId.newInstance(0, 0);
+        ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
+            appId, 1);
+        UserGroupInformation ugi = UserGroupInformation.createRemoteUser(
+            appAttemptId.toString());
+        ugi.addTokenIdentifier(new NMTokenIdentifier(appAttemptId, context
+            .getNodeId(), user, context.getNMTokenSecretManager()
+                .getCurrentKey().getKeyId()));
+        return ugi;
+      }
+
+      @Override
+      protected void authorizeGetAndStopContainerRequest(
+          ContainerId containerId, Container container, boolean stopRequest,
+          NMTokenIdentifier identifier) throws YarnException {
+        if (container == null || container.getUser().equals("Fail")) {
+          throw new YarnException("Reject this container");
+        }
+      }
+    };
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/25] hadoop git commit: YARN-5124. Modify AMRMClient to set the ExecutionType in the ResourceRequest. (asuresh)

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51432779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestDistributedScheduling.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestDistributedScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestDistributedScheduling.java
index 6d93eb3..a556aa2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestDistributedScheduling.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestDistributedScheduling.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -22,20 +22,31 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.service.Service;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.NMToken;
+import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
@@ -43,12 +54,23 @@ import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.After;
 import org.junit.Assert;
-import org.junit.Ignore;
+import org.junit.Before;
 import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 /**
  * Validates End2End Distributed Scheduling flow which includes the AM
@@ -57,11 +79,70 @@ import java.util.List;
  * the NM and the DistributedSchedulingProtocol used by the framework to talk
  * to the DistributedSchedulingService running on the RM.
  */
-public class TestDistributedScheduling extends TestAMRMProxy {
+public class TestDistributedScheduling extends BaseAMRMProxyE2ETest {
 
   private static final Log LOG =
       LogFactory.getLog(TestDistributedScheduling.class);
 
+  protected MiniYARNCluster cluster;
+  protected YarnClient rmClient;
+  protected ApplicationMasterProtocol client;
+  protected Configuration conf;
+  protected Configuration yarnConf;
+  protected ApplicationAttemptId attemptId;
+  protected ApplicationId appId;
+
+  @Before
+  public void doBefore() throws Exception {
+    cluster = new MiniYARNCluster("testDistributedSchedulingE2E", 1, 1, 1);
+
+    conf = new YarnConfiguration();
+    conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
+    conf.setBoolean(YarnConfiguration.DIST_SCHEDULING_ENABLED, true);
+    conf.setBoolean(YarnConfiguration.NM_CONTAINER_QUEUING_ENABLED, true);
+    cluster.init(conf);
+    cluster.start();
+    yarnConf = cluster.getConfig();
+
+    // the client has to connect to AMRMProxy
+    yarnConf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS,
+        YarnConfiguration.DEFAULT_AMRM_PROXY_ADDRESS);
+    rmClient = YarnClient.createYarnClient();
+    rmClient.init(yarnConf);
+    rmClient.start();
+
+    // Submit application
+    attemptId = createApp(rmClient, cluster, conf);
+    appId = attemptId.getApplicationId();
+    client = createAMRMProtocol(rmClient, appId, cluster, yarnConf);
+  }
+
+  @After
+  public void doAfter() throws Exception {
+    if (client != null) {
+      try {
+        client.finishApplicationMaster(FinishApplicationMasterRequest
+            .newInstance(FinalApplicationStatus.SUCCEEDED, "success", null));
+        rmClient.killApplication(attemptId.getApplicationId());
+        attemptId = null;
+      } catch (Exception e) {
+      }
+    }
+    if (rmClient != null) {
+      try {
+        rmClient.stop();
+      } catch (Exception e) {
+      }
+    }
+    if (cluster != null) {
+      try {
+        cluster.stop();
+      } catch (Exception e) {
+      }
+    }
+  }
+
+
   /**
    * Validates if Allocate Requests containing only OPPORTUNISTIC container
    * requests are satisfied instantly.
@@ -70,104 +151,63 @@ public class TestDistributedScheduling extends TestAMRMProxy {
    */
   @Test(timeout = 60000)
   public void testOpportunisticExecutionTypeRequestE2E() throws Exception {
-    MiniYARNCluster cluster =
-        new MiniYARNCluster("testDistributedSchedulingE2E", 1, 1, 1);
-    YarnClient rmClient = null;
-    ApplicationMasterProtocol client;
-
-    try {
-      Configuration conf = new YarnConfiguration();
-      conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
-      conf.setBoolean(YarnConfiguration.DIST_SCHEDULING_ENABLED, true);
-      conf.setBoolean(YarnConfiguration.NM_CONTAINER_QUEUING_ENABLED, true);
-      cluster.init(conf);
-      cluster.start();
-      final Configuration yarnConf = cluster.getConfig();
-
-      // the client has to connect to AMRMProxy
-
-      yarnConf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS,
-          YarnConfiguration.DEFAULT_AMRM_PROXY_ADDRESS);
-      rmClient = YarnClient.createYarnClient();
-      rmClient.init(yarnConf);
-      rmClient.start();
-
-      // Submit application
-
-      ApplicationId appId = createApp(rmClient, cluster);
-
-      client = createAMRMProtocol(rmClient, appId, cluster, yarnConf);
-
-      LOG.info("testDistributedSchedulingE2E - Register");
-
-      RegisterApplicationMasterResponse responseRegister =
-          client.registerApplicationMaster(RegisterApplicationMasterRequest
-              .newInstance(NetUtils.getHostname(), 1024, ""));
-
-      Assert.assertNotNull(responseRegister);
-      Assert.assertNotNull(responseRegister.getQueue());
-      Assert.assertNotNull(responseRegister.getApplicationACLs());
-      Assert.assertNotNull(responseRegister.getClientToAMTokenMasterKey());
-      Assert
-          .assertNotNull(responseRegister.getContainersFromPreviousAttempts());
-      Assert.assertNotNull(responseRegister.getSchedulerResourceTypes());
-      Assert.assertNotNull(responseRegister.getMaximumResourceCapability());
-
-      RMApp rmApp =
-          cluster.getResourceManager().getRMContext().getRMApps().get(appId);
-      Assert.assertEquals(RMAppState.RUNNING, rmApp.getState());
-
-      LOG.info("testDistributedSchedulingE2E - Allocate");
-
-      AllocateRequest request =
-          createAllocateRequest(rmClient.getNodeReports(NodeState.RUNNING));
-
-      // Replace 'ANY' requests with OPPORTUNISTIC aks and remove
-      // everything else
-      List<ResourceRequest> newAskList = new ArrayList<>();
-      for (ResourceRequest rr : request.getAskList()) {
-        if (ResourceRequest.ANY.equals(rr.getResourceName())) {
-          ResourceRequest newRR = ResourceRequest.newInstance(rr
-                  .getPriority(), rr.getResourceName(),
-              rr.getCapability(), rr.getNumContainers(), rr.getRelaxLocality(),
-              rr.getNodeLabelExpression(),
-              ExecutionTypeRequest.newInstance(
-                  ExecutionType.OPPORTUNISTIC, true));
-          newAskList.add(newRR);
-        }
-      }
-      request.setAskList(newAskList);
-
-      AllocateResponse allocResponse = client.allocate(request);
-      Assert.assertNotNull(allocResponse);
-
-      // Ensure that all the requests are satisfied immediately
-      Assert.assertEquals(2, allocResponse.getAllocatedContainers().size());
-
-      // Verify that the allocated containers are OPPORTUNISTIC
-      for (Container allocatedContainer : allocResponse
-          .getAllocatedContainers()) {
-        ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils
-            .newContainerTokenIdentifier(
-                allocatedContainer.getContainerToken());
-        Assert.assertEquals(ExecutionType.OPPORTUNISTIC,
-            containerTokenIdentifier.getExecutionType());
-      }
-
-      LOG.info("testDistributedSchedulingE2E - Finish");
-
-      FinishApplicationMasterResponse responseFinish =
-          client.finishApplicationMaster(FinishApplicationMasterRequest
-              .newInstance(FinalApplicationStatus.SUCCEEDED, "success", null));
-
-      Assert.assertNotNull(responseFinish);
-
-    } finally {
-      if (rmClient != null) {
-        rmClient.stop();
+    LOG.info("testDistributedSchedulingE2E - Register");
+
+    RegisterApplicationMasterResponse responseRegister =
+        client.registerApplicationMaster(RegisterApplicationMasterRequest
+            .newInstance(NetUtils.getHostname(), 1024, ""));
+
+    Assert.assertNotNull(responseRegister);
+    Assert.assertNotNull(responseRegister.getQueue());
+    Assert.assertNotNull(responseRegister.getApplicationACLs());
+    Assert.assertNotNull(responseRegister.getClientToAMTokenMasterKey());
+    Assert
+        .assertNotNull(responseRegister.getContainersFromPreviousAttempts());
+    Assert.assertNotNull(responseRegister.getSchedulerResourceTypes());
+    Assert.assertNotNull(responseRegister.getMaximumResourceCapability());
+
+    RMApp rmApp =
+        cluster.getResourceManager().getRMContext().getRMApps().get(appId);
+    Assert.assertEquals(RMAppState.RUNNING, rmApp.getState());
+
+    LOG.info("testDistributedSchedulingE2E - Allocate");
+
+    AllocateRequest request =
+        createAllocateRequest(rmClient.getNodeReports(NodeState.RUNNING));
+
+    // Replace 'ANY' requests with OPPORTUNISTIC aks and remove
+    // everything else
+    List<ResourceRequest> newAskList = new ArrayList<>();
+    for (ResourceRequest rr : request.getAskList()) {
+      if (ResourceRequest.ANY.equals(rr.getResourceName())) {
+        ResourceRequest newRR = ResourceRequest.newInstance(rr
+                .getPriority(), rr.getResourceName(),
+            rr.getCapability(), rr.getNumContainers(), rr.getRelaxLocality(),
+            rr.getNodeLabelExpression(),
+            ExecutionTypeRequest.newInstance(
+                ExecutionType.OPPORTUNISTIC, true));
+        newAskList.add(newRR);
       }
-      cluster.stop();
     }
+    request.setAskList(newAskList);
+
+    AllocateResponse allocResponse = client.allocate(request);
+    Assert.assertNotNull(allocResponse);
+
+    // Ensure that all the requests are satisfied immediately
+    Assert.assertEquals(2, allocResponse.getAllocatedContainers().size());
+
+    // Verify that the allocated containers are OPPORTUNISTIC
+    for (Container allocatedContainer : allocResponse
+        .getAllocatedContainers()) {
+      ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils
+          .newContainerTokenIdentifier(
+              allocatedContainer.getContainerToken());
+      Assert.assertEquals(ExecutionType.OPPORTUNISTIC,
+          containerTokenIdentifier.getExecutionType());
+    }
+
+    LOG.info("testDistributedSchedulingE2E - Finish");
   }
 
   /**
@@ -178,135 +218,305 @@ public class TestDistributedScheduling extends TestAMRMProxy {
    */
   @Test(timeout = 60000)
   public void testMixedExecutionTypeRequestE2E() throws Exception {
-    MiniYARNCluster cluster =
-        new MiniYARNCluster("testDistributedSchedulingE2E", 1, 1, 1);
-    YarnClient rmClient = null;
-    ApplicationMasterProtocol client;
+    LOG.info("testDistributedSchedulingE2E - Register");
+
+    RegisterApplicationMasterResponse responseRegister =
+        client.registerApplicationMaster(RegisterApplicationMasterRequest
+            .newInstance(NetUtils.getHostname(), 1024, ""));
+
+    Assert.assertNotNull(responseRegister);
+    Assert.assertNotNull(responseRegister.getQueue());
+    Assert.assertNotNull(responseRegister.getApplicationACLs());
+    Assert.assertNotNull(responseRegister.getClientToAMTokenMasterKey());
+    Assert
+        .assertNotNull(responseRegister.getContainersFromPreviousAttempts());
+    Assert.assertNotNull(responseRegister.getSchedulerResourceTypes());
+    Assert.assertNotNull(responseRegister.getMaximumResourceCapability());
+
+    RMApp rmApp =
+        cluster.getResourceManager().getRMContext().getRMApps().get(appId);
+    Assert.assertEquals(RMAppState.RUNNING, rmApp.getState());
+
+    LOG.info("testDistributedSchedulingE2E - Allocate");
+
+    AllocateRequest request =
+        createAllocateRequest(rmClient.getNodeReports(NodeState.RUNNING));
+    List<ResourceRequest> askList = request.getAskList();
+    List<ResourceRequest> newAskList = new ArrayList<>(askList);
+
+    // Duplicate all ANY requests marking them as opportunistic
+    for (ResourceRequest rr : askList) {
+      if (ResourceRequest.ANY.equals(rr.getResourceName())) {
+        ResourceRequest newRR = ResourceRequest.newInstance(rr
+                .getPriority(), rr.getResourceName(),
+            rr.getCapability(), rr.getNumContainers(), rr.getRelaxLocality(),
+            rr.getNodeLabelExpression(),
+            ExecutionTypeRequest.newInstance(
+                ExecutionType.OPPORTUNISTIC, true));
+        newAskList.add(newRR);
+      }
+    }
+    request.setAskList(newAskList);
+
+    AllocateResponse allocResponse = client.allocate(request);
+    Assert.assertNotNull(allocResponse);
+
+    // Ensure that all the requests are satisfied immediately
+    Assert.assertEquals(2, allocResponse.getAllocatedContainers().size());
+
+    // Verify that the allocated containers are OPPORTUNISTIC
+    for (Container allocatedContainer : allocResponse
+        .getAllocatedContainers()) {
+      ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils
+          .newContainerTokenIdentifier(
+              allocatedContainer.getContainerToken());
+      Assert.assertEquals(ExecutionType.OPPORTUNISTIC,
+          containerTokenIdentifier.getExecutionType());
+    }
+
+    request.setAskList(new ArrayList<ResourceRequest>());
+    request.setResponseId(request.getResponseId() + 1);
 
+    Thread.sleep(1000);
+
+    // RM should allocate GUARANTEED containers within 2 calls to allocate()
+    allocResponse = client.allocate(request);
+    Assert.assertNotNull(allocResponse);
+    Assert.assertEquals(2, allocResponse.getAllocatedContainers().size());
+
+    // Verify that the allocated containers are GUARANTEED
+    for (Container allocatedContainer : allocResponse
+        .getAllocatedContainers()) {
+      ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils
+          .newContainerTokenIdentifier(
+              allocatedContainer.getContainerToken());
+      Assert.assertEquals(ExecutionType.GUARANTEED,
+          containerTokenIdentifier.getExecutionType());
+    }
+
+    LOG.info("testDistributedSchedulingE2E - Finish");
+  }
+
+  /**
+   * Validates if AMRMClient can be used with Distributed Scheduling turned on.
+   *
+   * @throws Exception
+   */
+  @Test(timeout = 120000)
+  @SuppressWarnings("unchecked")
+  public void testAMRMClient() throws Exception {
+    AMRMClientImpl<AMRMClient.ContainerRequest> amClient = null;
     try {
-      Configuration conf = new YarnConfiguration();
-      conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
-      conf.setBoolean(YarnConfiguration.DIST_SCHEDULING_ENABLED, true);
-      conf.setBoolean(YarnConfiguration.NM_CONTAINER_QUEUING_ENABLED, true);
-      cluster.init(conf);
-      cluster.start();
-      final Configuration yarnConf = cluster.getConfig();
-
-      // the client has to connect to AMRMProxy
-
-      yarnConf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS,
-          YarnConfiguration.DEFAULT_AMRM_PROXY_ADDRESS);
-      rmClient = YarnClient.createYarnClient();
-      rmClient.init(yarnConf);
-      rmClient.start();
-
-      // Submit application
-
-      ApplicationId appId = createApp(rmClient, cluster);
-
-      client = createAMRMProtocol(rmClient, appId, cluster, yarnConf);
-
-      LOG.info("testDistributedSchedulingE2E - Register");
-
-      RegisterApplicationMasterResponse responseRegister =
-          client.registerApplicationMaster(RegisterApplicationMasterRequest
-              .newInstance(NetUtils.getHostname(), 1024, ""));
-
-      Assert.assertNotNull(responseRegister);
-      Assert.assertNotNull(responseRegister.getQueue());
-      Assert.assertNotNull(responseRegister.getApplicationACLs());
-      Assert.assertNotNull(responseRegister.getClientToAMTokenMasterKey());
-      Assert
-          .assertNotNull(responseRegister.getContainersFromPreviousAttempts());
-      Assert.assertNotNull(responseRegister.getSchedulerResourceTypes());
-      Assert.assertNotNull(responseRegister.getMaximumResourceCapability());
-
-      RMApp rmApp =
-          cluster.getResourceManager().getRMContext().getRMApps().get(appId);
-      Assert.assertEquals(RMAppState.RUNNING, rmApp.getState());
-
-      LOG.info("testDistributedSchedulingE2E - Allocate");
-
-      AllocateRequest request =
-          createAllocateRequest(rmClient.getNodeReports(NodeState.RUNNING));
-      List<ResourceRequest> askList = request.getAskList();
-      List<ResourceRequest> newAskList = new ArrayList<>(askList);
-
-      // Duplicate all ANY requests marking them as opportunistic
-      for (ResourceRequest rr : askList) {
-        if (ResourceRequest.ANY.equals(rr.getResourceName())) {
-          ResourceRequest newRR = ResourceRequest.newInstance(rr
-              .getPriority(), rr.getResourceName(),
-              rr.getCapability(), rr.getNumContainers(), rr.getRelaxLocality(),
-              rr.getNodeLabelExpression(),
+      Priority priority = Priority.newInstance(1);
+      Priority priority2 = Priority.newInstance(2);
+      Resource capability = Resource.newInstance(1024, 1);
+
+      List<NodeReport> nodeReports = rmClient.getNodeReports(NodeState.RUNNING);
+      String node = nodeReports.get(0).getNodeId().getHost();
+      String rack = nodeReports.get(0).getRackName();
+      String[] nodes = new String[]{node};
+      String[] racks = new String[]{rack};
+
+      // start am rm client
+      amClient = new AMRMClientImpl(client);
+      amClient.init(yarnConf);
+      amClient.start();
+      amClient.registerApplicationMaster(NetUtils.getHostname(), 1024, "");
+
+      assertEquals(0, amClient.ask.size());
+      assertEquals(0, amClient.release.size());
+
+      amClient.addContainerRequest(
+          new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+      amClient.addContainerRequest(
+          new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+      amClient.addContainerRequest(
+          new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+      amClient.addContainerRequest(
+          new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+      amClient.addContainerRequest(
+          new AMRMClient.ContainerRequest(capability, null, null, priority2,
+              true, null,
+              ExecutionTypeRequest.newInstance(
+                  ExecutionType.OPPORTUNISTIC, true)));
+      amClient.addContainerRequest(
+          new AMRMClient.ContainerRequest(capability, null, null, priority2,
+              true, null,
+              ExecutionTypeRequest.newInstance(
+                  ExecutionType.OPPORTUNISTIC, true)));
+
+      amClient.removeContainerRequest(
+          new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+      amClient.removeContainerRequest(
+          new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+      amClient.removeContainerRequest(
+          new AMRMClient.ContainerRequest(capability, null, null, priority2,
+              true, null,
               ExecutionTypeRequest.newInstance(
-                  ExecutionType.OPPORTUNISTIC, true));
-          newAskList.add(newRR);
+                  ExecutionType.OPPORTUNISTIC, true)));
+
+      int containersRequestedNode = amClient.remoteRequestsTable.get(priority,
+          node, ExecutionType.GUARANTEED, capability).remoteRequest
+          .getNumContainers();
+      int containersRequestedRack = amClient.remoteRequestsTable.get(priority,
+          rack, ExecutionType.GUARANTEED, capability).remoteRequest
+          .getNumContainers();
+      int containersRequestedAny = amClient.remoteRequestsTable.get(priority,
+          ResourceRequest.ANY, ExecutionType.GUARANTEED, capability)
+          .remoteRequest.getNumContainers();
+      int oppContainersRequestedAny =
+          amClient.remoteRequestsTable.get(priority2, ResourceRequest.ANY,
+              ExecutionType.OPPORTUNISTIC, capability).remoteRequest
+              .getNumContainers();
+
+      assertEquals(2, containersRequestedNode);
+      assertEquals(2, containersRequestedRack);
+      assertEquals(2, containersRequestedAny);
+      assertEquals(1, oppContainersRequestedAny);
+
+      assertEquals(4, amClient.ask.size());
+      assertEquals(0, amClient.release.size());
+
+      // RM should allocate container within 2 calls to allocate()
+      int allocatedContainerCount = 0;
+      int iterationsLeft = 10;
+      Set<ContainerId> releases = new TreeSet<>();
+
+      amClient.getNMTokenCache().clearCache();
+      Assert.assertEquals(0,
+          amClient.getNMTokenCache().numberOfTokensInCache());
+      HashMap<String, Token> receivedNMTokens = new HashMap<>();
+
+      while (allocatedContainerCount <
+          (containersRequestedAny + oppContainersRequestedAny)
+          && iterationsLeft-- > 0) {
+        AllocateResponse allocResponse = amClient.allocate(0.1f);
+        assertEquals(0, amClient.ask.size());
+        assertEquals(0, amClient.release.size());
+
+        allocatedContainerCount += allocResponse.getAllocatedContainers()
+            .size();
+        for (Container container : allocResponse.getAllocatedContainers()) {
+          ContainerId rejectContainerId = container.getId();
+          releases.add(rejectContainerId);
         }
-      }
-      request.setAskList(newAskList);
-
-      AllocateResponse allocResponse = client.allocate(request);
-      Assert.assertNotNull(allocResponse);
-
-      // Ensure that all the requests are satisfied immediately
-      Assert.assertEquals(2, allocResponse.getAllocatedContainers().size());
-
-      // Verify that the allocated containers are OPPORTUNISTIC
-      for (Container allocatedContainer : allocResponse
-          .getAllocatedContainers()) {
-        ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils
-            .newContainerTokenIdentifier(
-            allocatedContainer.getContainerToken());
-        Assert.assertEquals(ExecutionType.OPPORTUNISTIC,
-            containerTokenIdentifier.getExecutionType());
-      }
-
-      request.setAskList(new ArrayList<ResourceRequest>());
-      request.setResponseId(request.getResponseId() + 1);
-
-      Thread.sleep(1000);
 
-      // RM should allocate GUARANTEED containers within 2 calls to allocate()
-      allocResponse = client.allocate(request);
-      Assert.assertNotNull(allocResponse);
-      Assert.assertEquals(2, allocResponse.getAllocatedContainers().size());
+        for (NMToken token : allocResponse.getNMTokens()) {
+          String nodeID = token.getNodeId().toString();
+          receivedNMTokens.put(nodeID, token.getToken());
+        }
 
-      // Verify that the allocated containers are GUARANTEED
-      for (Container allocatedContainer : allocResponse
-          .getAllocatedContainers()) {
-        ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils
-            .newContainerTokenIdentifier(
-                allocatedContainer.getContainerToken());
-        Assert.assertEquals(ExecutionType.GUARANTEED,
-            containerTokenIdentifier.getExecutionType());
+        if (allocatedContainerCount < containersRequestedAny) {
+          // sleep to let NM's heartbeat to RM and trigger allocations
+          sleep(100);
+        }
       }
 
-      LOG.info("testDistributedSchedulingE2E - Finish");
+      assertEquals(allocatedContainerCount,
+          containersRequestedAny + oppContainersRequestedAny);
+      for (ContainerId rejectContainerId : releases) {
+        amClient.releaseAssignedContainer(rejectContainerId);
+      }
+      assertEquals(3, amClient.release.size());
+      assertEquals(0, amClient.ask.size());
+
+      // need to tell the AMRMClient that we dont need these resources anymore
+      amClient.removeContainerRequest(
+          new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+      amClient.removeContainerRequest(
+          new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+      amClient.removeContainerRequest(
+          new AMRMClient.ContainerRequest(capability, nodes, racks, priority2,
+              true, null,
+              ExecutionTypeRequest.newInstance(
+                  ExecutionType.OPPORTUNISTIC, true)));
+      assertEquals(4, amClient.ask.size());
+
+      // test RPC exception handling
+      amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability,
+          nodes, racks, priority));
+      amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability,
+          nodes, racks, priority));
+      amClient.addContainerRequest(
+          new AMRMClient.ContainerRequest(capability, nodes, racks, priority2,
+              true, null,
+              ExecutionTypeRequest.newInstance(
+                  ExecutionType.OPPORTUNISTIC, true)));
+
+      final AMRMClient amc = amClient;
+      ApplicationMasterProtocol realRM = amClient.rmClient;
+      try {
+        ApplicationMasterProtocol mockRM = mock(ApplicationMasterProtocol
+            .class);
+        when(mockRM.allocate(any(AllocateRequest.class))).thenAnswer(
+            new Answer<AllocateResponse>() {
+              public AllocateResponse answer(InvocationOnMock invocation)
+                  throws Exception {
+                amc.removeContainerRequest(
+                    new AMRMClient.ContainerRequest(capability, nodes,
+                        racks, priority));
+                amc.removeContainerRequest(
+                    new AMRMClient.ContainerRequest(capability, nodes, racks,
+                        priority));
+                amc.removeContainerRequest(
+                    new AMRMClient.ContainerRequest(capability, null, null,
+                        priority2, true, null,
+                        ExecutionTypeRequest.newInstance(
+                            ExecutionType.OPPORTUNISTIC, true)));
+                throw new Exception();
+              }
+            });
+        amClient.rmClient = mockRM;
+        amClient.allocate(0.1f);
+      } catch (Exception ioe) {
+      } finally {
+        amClient.rmClient = realRM;
+      }
 
-      FinishApplicationMasterResponse responseFinish =
-          client.finishApplicationMaster(FinishApplicationMasterRequest
-              .newInstance(FinalApplicationStatus.SUCCEEDED, "success", null));
+      assertEquals(3, amClient.release.size());
+      assertEquals(6, amClient.ask.size());
+
+      iterationsLeft = 3;
+      // do a few iterations to ensure RM is not going send new containers
+      while (iterationsLeft-- > 0) {
+        // inform RM of rejection
+        AllocateResponse allocResponse = amClient.allocate(0.1f);
+        // RM did not send new containers because AM does not need any
+        assertEquals(0, allocResponse.getAllocatedContainers().size());
+        if (allocResponse.getCompletedContainersStatuses().size() > 0) {
+          for (ContainerStatus cStatus : allocResponse
+              .getCompletedContainersStatuses()) {
+            if (releases.contains(cStatus.getContainerId())) {
+              assertEquals(cStatus.getState(), ContainerState.COMPLETE);
+              assertEquals(-100, cStatus.getExitStatus());
+              releases.remove(cStatus.getContainerId());
+            }
+          }
+        }
+        if (iterationsLeft > 0) {
+          // sleep to make sure NM's heartbeat
+          sleep(100);
+        }
+      }
+      assertEquals(0, amClient.ask.size());
+      assertEquals(0, amClient.release.size());
 
-      Assert.assertNotNull(responseFinish);
+      amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,
+          null, null);
 
     } finally {
-      if (rmClient != null) {
-        rmClient.stop();
+      if (amClient != null && amClient.getServiceState() == Service.STATE
+          .STARTED) {
+        amClient.stop();
       }
-      cluster.stop();
     }
   }
 
-  @Ignore
-  @Override
-  public void testAMRMProxyE2E() throws Exception { }
-
-  @Ignore
-  @Override
-  public void testE2ETokenRenewal() throws Exception { }
-
-  @Ignore
-  @Override
-  public void testE2ETokenSwap() throws Exception { }
+  private void sleep(int sleepTime) {
+    try {
+      Thread.sleep(sleepTime);
+    } catch (InterruptedException e) {
+      e.printStackTrace();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51432779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
index cd04130..969fb70 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeReport;
@@ -251,9 +252,9 @@ public class TestNMClient {
           racks, priority));
     }
 
-    int containersRequestedAny = rmClient.remoteRequestsTable.get(priority)
-        .get(ResourceRequest.ANY).get(capability).remoteRequest
-        .getNumContainers();
+    int containersRequestedAny = rmClient.remoteRequestsTable.get(priority,
+        ResourceRequest.ANY, ExecutionType.GUARANTEED, capability)
+        .remoteRequest.getNumContainers();
 
     // RM should allocate container within 2 calls to allocate()
     int allocatedContainerCount = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51432779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
index fd56f4f..b0c4b97 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
@@ -214,7 +214,8 @@ public class ResourceRequestPBImpl extends  ResourceRequest {
         + ", # Containers: " + getNumContainers()
         + ", Location: " + getResourceName()
         + ", Relax Locality: " + getRelaxLocality()
-        + ", Execution Spec: " + getExecutionTypeRequest() + "}";
+        + ", Execution Type Request: " + getExecutionTypeRequest()
+        + ", Node Label Expression: " + getNodeLabelExpression() + "}";
   }
 
   @Override
@@ -235,4 +236,4 @@ public class ResourceRequestPBImpl extends  ResourceRequest {
     }
     builder.setNodeLabelExpression(nodeLabelExpression);
   }
-}
\ No newline at end of file
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/25] hadoop git commit: YARN-3426. Add jdiff support to YARN. (vinodkv via wangda)

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/03fc6b1b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.6.0.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.6.0.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.6.0.xml
new file mode 100644
index 0000000..5d58600
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.6.0.xml
@@ -0,0 +1,13076 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Wed Apr 08 11:29:43 PDT 2015 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="hadoop-yarn-api 2.6.0"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-annotations.jar:/Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/jdiff.jar -verbose -classpath /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/classes:/Users/llu/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/llu/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/llu/.m2/repository/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar:/Users/llu/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/llu/.m2/repository/org/apache/hadoop/hadoop-common/2.6.0/hadoop-common-2.6.0.jar:/Users/llu/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/llu/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/l
 lu/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/llu/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/llu/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/llu/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/llu/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/llu/.m2/repository/commons-collections/commons-collections/3.2.1/commons-collections-3.2.1.jar:/Users/llu/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/llu/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/llu/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/llu/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/llu/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2
 .3-1/jaxb-impl-2.2.3-1.jar:/Users/llu/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/llu/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/llu/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/llu/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/llu/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/llu/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/llu/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/llu/.m2/repository/com/jamesmurty/uti
 ls/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/llu/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/llu/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/llu/.m2/repository/org/slf4j/slf4j-api/1.7.5/slf4j-api-1.7.5.jar:/Users/llu/.m2/repository/org/slf4j/slf4j-log4j12/1.7.5/slf4j-log4j12-1.7.5.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/llu/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/llu/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/llu/.m2/repository/org/xerial/snappy/snap
 py-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/llu/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/llu/.m2/repository/org/apache/hadoop/hadoop-auth/2.6.0/hadoop-auth-2.6.0.jar:/Users/llu/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/llu/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/llu/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/llu/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/llu/.m2/repository/org/apache/curator/curator-framework/2.6.0/curator-framework-2.6.0.jar:/Users/llu/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/llu/.m2/repository/org/apache/curator/curator-client/2.6.0/curator-client-2.6.0.jar:/Users/llu/.m2/repository/org/apache/curator/curator-recipes/2.6.0/curator-recipes-2.6.0.jar:/Users/llu/.m2/repository/or
 g/htrace/htrace-core/3.0.4/htrace-core-3.0.4.jar:/Users/llu/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/llu/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/llu/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/llu/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/llu/.m2/repository/org/apache/hadoop/hadoop-annotations/2.6.0/hadoop-annotations-2.6.0.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_67.jdk/Contents/Home/lib/tools.jar:/Users/llu/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar -sourcepath /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java -apidir /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/site/jdiff/xml -apiname hadoop-yarn-api 2.6.0 -->
+<package name="org.apache.hadoop.yarn.api">
+  <!-- start interface org.apache.hadoop.yarn.api.ApplicationClientProtocol -->
+  <interface name="ApplicationClientProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getNewApplication" return="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to obtain a new {@link ApplicationId} for
+ submitting new applications.</p>
+
+ <p>The <code>ResourceManager</code> responds with a new, monotonically
+ increasing, {@link ApplicationId} which is used by the client to submit
+ a new application.</p>
+
+ <p>The <code>ResourceManager</code> also responds with details such
+ as maximum resource capabilities in the cluster as specified in
+ {@link GetNewApplicationResponse}.</p>
+
+ @param request request to get a new <code>ApplicationId</code>
+ @return response containing the new <code>ApplicationId</code> to be used
+ to submit an application
+ @throws YarnException
+ @throws IOException
+ @see #submitApplication(SubmitApplicationRequest)]]>
+      </doc>
+    </method>
+    <method name="submitApplication" return="org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to submit a new application to the
+ <code>ResourceManager.</code></p>
+
+ <p>The client is required to provide details such as queue,
+ {@link Resource} required to run the <code>ApplicationMaster</code>,
+ the equivalent of {@link ContainerLaunchContext} for launching
+ the <code>ApplicationMaster</code> etc. via the
+ {@link SubmitApplicationRequest}.</p>
+
+ <p>Currently the <code>ResourceManager</code> sends an immediate (empty)
+ {@link SubmitApplicationResponse} on accepting the submission and throws
+ an exception if it rejects the submission. However, this call needs to be
+ followed by {@link #getApplicationReport(GetApplicationReportRequest)}
+ to make sure that the application gets properly submitted - obtaining a
+ {@link SubmitApplicationResponse} from ResourceManager doesn't guarantee
+ that RM 'remembers' this application beyond failover or restart. If RM
+ failover or RM restart happens before ResourceManager saves the
+ application's state successfully, the subsequent
+ {@link #getApplicationReport(GetApplicationReportRequest)} will throw
+ a {@link ApplicationNotFoundException}. The Clients need to re-submit
+ the application with the same {@link ApplicationSubmissionContext} when
+ it encounters the {@link ApplicationNotFoundException} on the
+ {@link #getApplicationReport(GetApplicationReportRequest)} call.</p>
+
+ <p>During the submission process, it checks whether the application
+ already exists. If the application exists, it will simply return
+ SubmitApplicationResponse</p>
+
+ <p> In secure mode,the <code>ResourceManager</code> verifies access to
+ queues etc. before accepting the application submission.</p>
+
+ @param request request to submit a new application
+ @return (empty) response on accepting the submission
+ @throws YarnException
+ @throws IOException
+ @throws InvalidResourceRequestException
+           The exception is thrown when a {@link ResourceRequest} is out of
+           the range of the configured lower and upper resource boundaries.
+ @see #getNewApplication(GetNewApplicationRequest)]]>
+      </doc>
+    </method>
+    <method name="forceKillApplication" return="org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to request the
+ <code>ResourceManager</code> to abort submitted application.</p>
+
+ <p>The client, via {@link KillApplicationRequest} provides the
+ {@link ApplicationId} of the application to be aborted.</p>
+
+ <p> In secure mode,the <code>ResourceManager</code> verifies access to the
+ application, queue etc. before terminating the application.</p>
+
+ <p>Currently, the <code>ResourceManager</code> returns an empty response
+ on success and throws an exception on rejecting the request.</p>
+
+ @param request request to abort a submitted application
+ @return <code>ResourceManager</code> returns an empty response
+         on success and throws an exception on rejecting the request
+ @throws YarnException
+ @throws IOException
+ @see #getQueueUserAcls(GetQueueUserAclsInfoRequest)]]>
+      </doc>
+    </method>
+    <method name="getApplicationReport" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to get a report of an Application from
+ the <code>ResourceManager</code>.</p>
+
+ <p>The client, via {@link GetApplicationReportRequest} provides the
+ {@link ApplicationId} of the application.</p>
+
+ <p> In secure mode,the <code>ResourceManager</code> verifies access to the
+ application, queue etc. before accepting the request.</p>
+
+ <p>The <code>ResourceManager</code> responds with a
+ {@link GetApplicationReportResponse} which includes the
+ {@link ApplicationReport} for the application.</p>
+
+ <p>If the user does not have <code>VIEW_APP</code> access then the
+ following fields in the report will be set to stubbed values:
+ <ul>
+   <li>host - set to "N/A"</li>
+   <li>RPC port - set to -1</li>
+   <li>client token - set to "N/A"</li>
+   <li>diagnostics - set to "N/A"</li>
+   <li>tracking URL - set to "N/A"</li>
+   <li>original tracking URL - set to "N/A"</li>
+   <li>resource usage report - all values are -1</li>
+ </ul></p>
+
+ @param request request for an application report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getClusterMetrics" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to get metrics about the cluster from
+ the <code>ResourceManager</code>.</p>
+
+ <p>The <code>ResourceManager</code> responds with a
+ {@link GetClusterMetricsResponse} which includes the
+ {@link YarnClusterMetrics} with details such as number of current
+ nodes in the cluster.</p>
+
+ @param request request for cluster metrics
+ @return cluster metrics
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to get a report of Applications
+ matching the filters defined by {@link GetApplicationsRequest}
+ in the cluster from the <code>ResourceManager</code>.</p>
+
+ <p>The <code>ResourceManager</code> responds with a
+ {@link GetApplicationsResponse} which includes the
+ {@link ApplicationReport} for the applications.</p>
+
+ <p>If the user does not have <code>VIEW_APP</code> access for an
+ application then the corresponding report will be filtered as
+ described in {@link #getApplicationReport(GetApplicationReportRequest)}.
+ </p>
+
+ @param request request for report on applications
+ @return report on applications matching the given application types
+           defined in the request
+ @throws YarnException
+ @throws IOException
+ @see GetApplicationsRequest]]>
+      </doc>
+    </method>
+    <method name="getClusterNodes" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to get a report of all nodes
+ in the cluster from the <code>ResourceManager</code>.</p>
+
+ <p>The <code>ResourceManager</code> responds with a
+ {@link GetClusterNodesResponse} which includes the
+ {@link NodeReport} for all the nodes in the cluster.</p>
+
+ @param request request for report on all nodes
+ @return report on all nodes
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueInfo" return="org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to get information about <em>queues</em>
+ from the <code>ResourceManager</code>.</p>
+
+ <p>The client, via {@link GetQueueInfoRequest}, can ask for details such
+ as used/total resources, child queues, running applications etc.</p>
+
+ <p> In secure mode,the <code>ResourceManager</code> verifies access before
+ providing the information.</p>
+
+ @param request request to get queue information
+ @return queue information
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueUserAcls" return="org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to get information about <em>queue
+ acls</em> for <em>current user</em> from the <code>ResourceManager</code>.
+ </p>
+
+ <p>The <code>ResourceManager</code> responds with queue acls for all
+ existing queues.</p>
+
+ @param request request to get queue acls for <em>current user</em>
+ @return queue acls for <em>current user</em>
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getDelegationToken" return="org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to get delegation token, enabling the
+ containers to be able to talk to the service using those tokens.
+
+  <p> The <code>ResourceManager</code> responds with the delegation
+  {@link Token} that can be used by the client to speak to this
+  service.
+ @param request request to get a delegation token for the client.
+ @return delegation token that can be used to talk to this service
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="moveApplicationAcrossQueues" return="org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Move an application to a new queue.
+
+ @param request the application ID and the target queue
+ @return an empty response
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttemptReport" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get a report of an Application Attempt
+ from the <code>ResourceManager</code>
+ </p>
+
+ <p>
+ The client, via {@link GetApplicationAttemptReportRequest} provides the
+ {@link ApplicationAttemptId} of the application attempt.
+ </p>
+
+ <p>
+ In secure mode,the <code>ResourceManager</code> verifies access to
+ the method before accepting the request.
+ </p>
+
+ <p>
+ The <code>ResourceManager</code> responds with a
+ {@link GetApplicationAttemptReportResponse} which includes the
+ {@link ApplicationAttemptReport} for the application attempt.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access then the following
+ fields in the report will be set to stubbed values:
+ <ul>
+ <li>host</li>
+ <li>RPC port</li>
+ <li>client token</li>
+ <li>diagnostics - set to "N/A"</li>
+ <li>tracking URL</li>
+ </ul>
+ </p>
+
+ @param request
+          request for an application attempt report
+ @return application attempt report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttempts" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get a report of all Application attempts
+ in the cluster from the <code>ResourceManager</code>
+ </p>
+
+ <p>
+ The <code>ResourceManager</code> responds with a
+ {@link GetApplicationAttemptsRequest} which includes the
+ {@link ApplicationAttemptReport} for all the applications attempts of a
+ specified application attempt.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationAttemptReport(GetApplicationAttemptReportRequest)}.
+ </p>
+
+ @param request
+          request for reports on all application attempts of an application
+ @return reports on all application attempts of an application
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerReport" return="org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get a report of an Container from the
+ <code>ResourceManager</code>
+ </p>
+
+ <p>
+ The client, via {@link GetContainerReportRequest} provides the
+ {@link ContainerId} of the container.
+ </p>
+
+ <p>
+ In secure mode,the <code>ResourceManager</code> verifies access to the
+ method before accepting the request.
+ </p>
+
+ <p>
+ The <code>ResourceManager</code> responds with a
+ {@link GetContainerReportResponse} which includes the
+ {@link ContainerReport} for the container.
+ </p>
+
+ @param request
+          request for a container report
+ @return container report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainers" return="org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get a report of Containers for an
+ application attempt from the <code>ResourceManager</code>
+ </p>
+
+ <p>
+ The client, via {@link GetContainersRequest} provides the
+ {@link ApplicationAttemptId} of the application attempt.
+ </p>
+
+ <p>
+ In secure mode,the <code>ResourceManager</code> verifies access to the
+ method before accepting the request.
+ </p>
+
+ <p>
+ The <code>ResourceManager</code> responds with a
+ {@link GetContainersResponse} which includes a list of
+ {@link ContainerReport} for all the containers of a specific application
+ attempt.
+ </p>
+
+ @param request
+          request for a list of container reports of an application attempt.
+ @return reports on all containers of an application attempt
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="submitReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to submit a new reservation to the
+ {@code ResourceManager}.
+ </p>
+
+ <p>
+ The client packages all details of its request in a
+ {@link ReservationSubmissionRequest} object. This contains information
+ about the amount of capacity, temporal constraints, and concurrency needs.
+ Furthermore, the reservation might be composed of multiple stages, with
+ ordering dependencies among them.
+ </p>
+
+ <p>
+ In order to respond, a new admission control component in the
+ {@code ResourceManager} performs an analysis of the resources that have
+ been committed over the period of time the user is requesting, verify that
+ the user requests can be fulfilled, and that it respect a sharing policy
+ (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined
+ that the ReservationSubmissionRequest is satisfiable the
+ {@code ResourceManager} answers with a
+ {@link ReservationSubmissionResponse} that include a non-null
+ {@link ReservationId}. Upon failure to find a valid allocation the response
+ is an exception with the reason.
+
+ On application submission the client can use this {@link ReservationId} to
+ obtain access to the reserved resources.
+ </p>
+
+ <p>
+ The system guarantees that during the time-range specified by the user, the
+ reservationID will be corresponding to a valid reservation. The amount of
+ capacity dedicated to such queue can vary overtime, depending of the
+ allocation that has been determined. But it is guaranteed to satisfy all
+ the constraint expressed by the user in the
+ {@link ReservationSubmissionRequest}.
+ </p>
+
+ @param request the request to submit a new Reservation
+ @return response the {@link ReservationId} on accepting the submission
+ @throws YarnException if the request is invalid or reservation cannot be
+           created successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="updateReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to update an existing Reservation. This is
+ referred to as a re-negotiation process, in which a user that has
+ previously submitted a Reservation.
+ </p>
+
+ <p>
+ The allocation is attempted by virtually substituting all previous
+ allocations related to this Reservation with new ones, that satisfy the new
+ {@link ReservationUpdateRequest}. Upon success the previous allocation is
+ substituted by the new one, and on failure (i.e., if the system cannot find
+ a valid allocation for the updated request), the previous allocation
+ remains valid.
+
+ The {@link ReservationId} is not changed, and applications currently
+ running within this reservation will automatically receive the resources
+ based on the new allocation.
+ </p>
+
+ @param request to update an existing Reservation (the ReservationRequest
+          should refer to an existing valid {@link ReservationId})
+ @return response empty on successfully updating the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           updated successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="deleteReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to remove an existing Reservation.
+
+ Upon deletion of a reservation applications running with this reservation,
+ are automatically downgraded to normal jobs running without any dedicated
+ reservation.
+ </p>
+
+ @param request to remove an existing Reservation (the ReservationRequest
+          should refer to an existing valid {@link ReservationId})
+ @return response empty on successfully deleting the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           deleted successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getNodeToLabels" return="org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node to labels mappings in existing cluster
+ </p>
+
+ @param request
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeLabels" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node labels in the cluster
+ </p>
+
+ @param request to get node labels collection of this cluster
+ @return node labels collection of this cluster
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The protocol between clients and the <code>ResourceManager</code>
+ to submit/abort jobs and to get information on applications, cluster metrics,
+ nodes, queues and ACLs.</p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ApplicationClientProtocol -->
+  <!-- start interface org.apache.hadoop.yarn.api.ApplicationConstants -->
+  <interface name="ApplicationConstants"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <field name="APP_SUBMIT_TIME_ENV" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The environment variable for APP_SUBMIT_TIME. Set in AppMaster environment
+ only]]>
+      </doc>
+    </field>
+    <field name="CONTAINER_TOKEN_FILE_ENV_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The cache file into which container token is written]]>
+      </doc>
+    </field>
+    <field name="APPLICATION_WEB_PROXY_BASE_ENV" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The environmental variable for APPLICATION_WEB_PROXY_BASE. Set in
+ ApplicationMaster's environment only. This states that for all non-relative
+ web URLs in the app masters web UI what base should they have.]]>
+      </doc>
+    </field>
+    <field name="LOG_DIR_EXPANSION_VAR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The temporary environmental variable for container log directory. This
+ should be replaced by real container log directory on container launch.]]>
+      </doc>
+    </field>
+    <field name="CLASS_PATH_SEPARATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This constant is used to construct class path and it will be replaced with
+ real class path separator(':' for Linux and ';' for Windows) by
+ NodeManager on container launch. User has to use this constant to construct
+ class path if user wants cross-platform practice i.e. submit an application
+ from a Windows client to a Linux/Unix server or vice versa.]]>
+      </doc>
+    </field>
+    <field name="PARAMETER_EXPANSION_LEFT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The following two constants are used to expand parameter and it will be
+ replaced with real parameter expansion marker ('%' for Windows and '$' for
+ Linux) by NodeManager on container launch. For example: {{VAR}} will be
+ replaced as $VAR on Linux, and %VAR% on Windows. User has to use this
+ constant to construct class path if user wants cross-platform practice i.e.
+ submit an application from a Windows client to a Linux/Unix server or vice
+ versa.]]>
+      </doc>
+    </field>
+    <field name="PARAMETER_EXPANSION_RIGHT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[User has to use this constant to construct class path if user wants
+ cross-platform practice i.e. submit an application from a Windows client to
+ a Linux/Unix server or vice versa.]]>
+      </doc>
+    </field>
+    <field name="STDERR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="STDOUT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MAX_APP_ATTEMPTS_ENV" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The environment variable for MAX_APP_ATTEMPTS. Set in AppMaster environment
+ only]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[This is the API for the applications comprising of constants that YARN sets
+ up for the applications and the containers.
+
+ TODO: Investigate the semantics and security of each cross-boundary refs.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ApplicationConstants -->
+  <!-- start class org.apache.hadoop.yarn.api.ApplicationConstants.Environment -->
+  <class name="ApplicationConstants.Environment" extends="java.lang.Enum"
+    abstract="false"
+    static="true" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.ApplicationConstants.Environment[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.ApplicationConstants.Environment"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="key" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="$" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Expand the environment variable based on client OS environment variable
+ expansion syntax (e.g. $VAR for Linux and %VAR% for Windows).
+ <p>
+ Note: Use $$() method for cross-platform practice i.e. submit an
+ application from a Windows client to a Linux/Unix server or vice versa.
+ </p>]]>
+      </doc>
+    </method>
+    <method name="$$" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Expand the environment variable in platform-agnostic syntax. The
+ parameter expansion marker "{{VAR}}" will be replaced with real parameter
+ expansion marker ('%' for Windows and '$' for Linux) by NodeManager on
+ container launch. For example: {{VAR}} will be replaced as $VAR on Linux,
+ and %VAR% on Windows.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Environment for Applications.
+
+ Some of the environment variables for applications are <em>final</em>
+ i.e. they cannot be modified by the applications.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.ApplicationConstants.Environment -->
+  <!-- start interface org.apache.hadoop.yarn.api.ApplicationHistoryProtocol -->
+  <interface name="ApplicationHistoryProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getApplicationReport" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get a report of an Application from the
+ <code>ResourceManager</code>.
+ </p>
+
+ <p>
+ The client, via {@link GetApplicationReportRequest} provides the
+ {@link ApplicationId} of the application.
+ </p>
+
+ <p>
+ In secure mode,the <code>ApplicationHistoryServer</code> verifies access to
+ the application, queue etc. before accepting the request.
+ </p>
+
+ <p>
+ The <code>ApplicationHistoryServer</code> responds with a
+ {@link GetApplicationReportResponse} which includes the
+ {@link ApplicationReport} for the application.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access then the following
+ fields in the report will be set to stubbed values:
+ <ul>
+ <li>host - set to "N/A"</li>
+ <li>RPC port - set to -1</li>
+ <li>client token - set to "N/A"</li>
+ <li>diagnostics - set to "N/A"</li>
+ <li>tracking URL - set to "N/A"</li>
+ <li>original tracking URL - set to "N/A"</li>
+ <li>resource usage report - all values are -1</li>
+ </ul>
+ </p>
+
+ @param request
+          request for an application report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get a report of all Applications in the
+ cluster from the <code>ApplicationHistoryServer</code>.
+ </p>
+
+ <p>
+ The <code>ApplicationHistoryServer</code> responds with a
+ {@link GetApplicationsResponse} which includes a list of
+ {@link ApplicationReport} for all the applications.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(GetApplicationReportRequest)}.
+ </p>
+
+ @param request
+          request for reports on all the applications
+ @return report on applications matching the given application types defined
+         in the request
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttemptReport" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get a report of an Application Attempt
+ from the <code>ApplicationHistoryServer</code>.
+ </p>
+
+ <p>
+ The client, via {@link GetApplicationAttemptReportRequest} provides the
+ {@link ApplicationAttemptId} of the application attempt.
+ </p>
+
+ <p>
+ In secure mode,the <code>ApplicationHistoryServer</code> verifies access to
+ the method before accepting the request.
+ </p>
+
+ <p>
+ The <code>ApplicationHistoryServer</code> responds with a
+ {@link GetApplicationAttemptReportResponse} which includes the
+ {@link ApplicationAttemptReport} for the application attempt.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access then the following
+ fields in the report will be set to stubbed values:
+ <ul>
+ <li>host</li>
+ <li>RPC port</li>
+ <li>client token</li>
+ <li>diagnostics - set to "N/A"</li>
+ <li>tracking URL</li>
+ </ul>
+ </p>
+
+ @param request
+          request for an application attempt report
+ @return application attempt report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttempts" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get a report of all Application attempts
+ in the cluster from the <code>ApplicationHistoryServer</code>.
+ </p>
+
+ <p>
+ The <code>ApplicationHistoryServer</code> responds with a
+ {@link GetApplicationAttemptsRequest} which includes the
+ {@link ApplicationAttemptReport} for all the applications attempts of a
+ specified application attempt.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationAttemptReport(GetApplicationAttemptReportRequest)}.
+ </p>
+
+ @param request
+          request for reports on all application attempts of an application
+ @return reports on all application attempts of an application
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerReport" return="org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get a report of an Container from the
+ <code>ApplicationHistoryServer</code>.
+ </p>
+
+ <p>
+ The client, via {@link GetContainerReportRequest} provides the
+ {@link ContainerId} of the container.
+ </p>
+
+ <p>
+ In secure mode,the <code>ApplicationHistoryServer</code> verifies access to
+ the method before accepting the request.
+ </p>
+
+ <p>
+ The <code>ApplicationHistoryServer</code> responds with a
+ {@link GetContainerReportResponse} which includes the
+ {@link ContainerReport} for the container.
+ </p>
+
+ @param request
+          request for a container report
+ @return container report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainers" return="org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get a report of Containers for an
+ application attempt from the <code>ApplciationHistoryServer</code>.
+ </p>
+
+ <p>
+ The client, via {@link GetContainersRequest} provides the
+ {@link ApplicationAttemptId} of the application attempt.
+ </p>
+
+ <p>
+ In secure mode,the <code>ApplicationHistoryServer</code> verifies access to
+ the method before accepting the request.
+ </p>
+
+ <p>
+ The <code>ApplicationHistoryServer</code> responds with a
+ {@link GetContainersResponse} which includes a list of
+ {@link ContainerReport} for all the containers of a specific application
+ attempt.
+ </p>
+
+ @param request
+          request for a list of container reports of an application attempt.
+ @return reports on all containers of an application attempt
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getDelegationToken" return="org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get delegation token, enabling the
+ containers to be able to talk to the service using those tokens.
+ </p>
+
+ <p>
+ The <code>ApplicationHistoryServer</code> responds with the delegation
+ token {@link Token} that can be used by the client to speak to this
+ service.
+ </p>
+
+ @param request
+          request to get a delegation token for the client.
+ @return delegation token that can be used to talk to this service
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The protocol between clients and the <code>ApplicationHistoryServer</code> to
+ get the information of completed applications etc.
+ </p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ApplicationHistoryProtocol -->
+  <!-- start interface org.apache.hadoop.yarn.api.ApplicationMasterProtocol -->
+  <interface name="ApplicationMasterProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by a new <code>ApplicationMaster</code> to register with
+ the <code>ResourceManager</code>.
+ </p>
+
+ <p>
+ The <code>ApplicationMaster</code> needs to provide details such as RPC
+ Port, HTTP tracking url etc. as specified in
+ {@link RegisterApplicationMasterRequest}.
+ </p>
+
+ <p>
+ The <code>ResourceManager</code> responds with critical details such as
+ maximum resource capabilities in the cluster as specified in
+ {@link RegisterApplicationMasterResponse}.
+ </p>
+
+ @param request
+          registration request
+ @return registration respose
+ @throws YarnException
+ @throws IOException
+ @throws InvalidApplicationMasterRequestException
+           The exception is thrown when an ApplicationMaster tries to
+           register more then once.
+ @see RegisterApplicationMasterRequest
+ @see RegisterApplicationMasterResponse]]>
+      </doc>
+    </method>
+    <method name="finishApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by an <code>ApplicationMaster</code> to notify the
+ <code>ResourceManager</code> about its completion (success or failed).</p>
+
+ <p>The <code>ApplicationMaster</code> has to provide details such as
+ final state, diagnostics (in case of failures) etc. as specified in
+ {@link FinishApplicationMasterRequest}.</p>
+
+ <p>The <code>ResourceManager</code> responds with
+ {@link FinishApplicationMasterResponse}.</p>
+
+ @param request completion request
+ @return completion response
+ @throws YarnException
+ @throws IOException
+ @see FinishApplicationMasterRequest
+ @see FinishApplicationMasterResponse]]>
+      </doc>
+    </method>
+    <method name="allocate" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The main interface between an <code>ApplicationMaster</code> and the
+ <code>ResourceManager</code>.
+ </p>
+
+ <p>
+ The <code>ApplicationMaster</code> uses this interface to provide a list of
+ {@link ResourceRequest} and returns unused {@link Container} allocated to
+ it via {@link AllocateRequest}. Optionally, the
+ <code>ApplicationMaster</code> can also <em>blacklist</em> resources which
+ it doesn't want to use.
+ </p>
+
+ <p>
+ This also doubles up as a <em>heartbeat</em> to let the
+ <code>ResourceManager</code> know that the <code>ApplicationMaster</code>
+ is alive. Thus, applications should periodically make this call to be kept
+ alive. The frequency depends on
+ {@link YarnConfiguration#RM_AM_EXPIRY_INTERVAL_MS} which defaults to
+ {@link YarnConfiguration#DEFAULT_RM_AM_EXPIRY_INTERVAL_MS}.
+ </p>
+
+ <p>
+ The <code>ResourceManager</code> responds with list of allocated
+ {@link Container}, status of completed containers and headroom information
+ for the application.
+ </p>
+
+ <p>
+ The <code>ApplicationMaster</code> can use the available headroom
+ (resources) to decide how to utilized allocated resources and make informed
+ decisions about future resource requests.
+ </p>
+
+ @param request
+          allocation request
+ @return allocation response
+ @throws YarnException
+ @throws IOException
+ @throws InvalidApplicationMasterRequestException
+           This exception is thrown when an ApplicationMaster calls allocate
+           without registering first.
+ @throws InvalidResourceBlacklistRequestException
+           This exception is thrown when an application provides an invalid
+           specification for blacklist of resources.
+ @throws InvalidResourceRequestException
+           This exception is thrown when a {@link ResourceRequest} is out of
+           the range of the configured lower and upper limits on the
+           resources.
+ @see AllocateRequest
+ @see AllocateResponse]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The protocol between a live instance of <code>ApplicationMaster</code>
+ and the <code>ResourceManager</code>.</p>
+
+ <p>This is used by the <code>ApplicationMaster</code> to register/unregister
+ and to request and obtain resources in the cluster from the
+ <code>ResourceManager</code>.</p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ApplicationMasterProtocol -->
+  <!-- start interface org.apache.hadoop.yarn.api.ContainerManagementProtocol -->
+  <interface name="ContainerManagementProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="startContainers" return="org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The <code>ApplicationMaster</code> provides a list of
+ {@link StartContainerRequest}s to a <code>NodeManager</code> to
+ <em>start</em> {@link Container}s allocated to it using this interface.
+ </p>
+
+ <p>
+ The <code>ApplicationMaster</code> has to provide details such as allocated
+ resource capability, security tokens (if enabled), command to be executed
+ to start the container, environment for the process, necessary
+ binaries/jar/shared-objects etc. via the {@link ContainerLaunchContext} in
+ the {@link StartContainerRequest}.
+ </p>
+
+ <p>
+ The <code>NodeManager</code> sends a response via
+ {@link StartContainersResponse} which includes a list of
+ {@link Container}s of successfully launched {@link Container}s, a
+ containerId-to-exception map for each failed {@link StartContainerRequest} in
+ which the exception indicates errors from per container and a
+ allServicesMetaData map between the names of auxiliary services and their
+ corresponding meta-data. Note: None-container-specific exceptions will
+ still be thrown by the API method itself.
+ </p>
+ <p>
+ The <code>ApplicationMaster</code> can use
+ {@link #getContainerStatuses(GetContainerStatusesRequest)} to get updated
+ statuses of the to-be-launched or launched containers.
+ </p>
+
+ @param request
+          request to start a list of containers
+ @return response including conatinerIds of all successfully launched
+         containers, a containerId-to-exception map for failed requests and
+         a allServicesMetaData map.
+ @throws YarnException
+ @throws IOException
+ @throws NMNotYetReadyException
+           This exception is thrown when NM starts from scratch but has not
+           yet connected with RM.]]>
+      </doc>
+    </method>
+    <method name="stopContainers" return="org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The <code>ApplicationMaster</code> requests a <code>NodeManager</code> to
+ <em>stop</em> a list of {@link Container}s allocated to it using this
+ interface.
+ </p>
+
+ <p>
+ The <code>ApplicationMaster</code> sends a {@link StopContainersRequest}
+ which includes the {@link ContainerId}s of the containers to be stopped.
+ </p>
+
+ <p>
+ The <code>NodeManager</code> sends a response via
+ {@link StopContainersResponse} which includes a list of {@link ContainerId}
+ s of successfully stopped containers, a containerId-to-exception map for
+ each failed request in which the exception indicates errors from per
+ container. Note: None-container-specific exceptions will still be thrown by
+ the API method itself. <code>ApplicationMaster</code> can use
+ {@link #getContainerStatuses(GetContainerStatusesRequest)} to get updated
+ statuses of the containers.
+ </p>
+
+ @param request
+          request to stop a list of containers
+ @return response which includes a list of containerIds of successfully
+         stopped containers, a containerId-to-exception map for failed
+         requests.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerStatuses" return="org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The API used by the <code>ApplicationMaster</code> to request for current
+ statuses of <code>Container</code>s from the <code>NodeManager</code>.
+ </p>
+
+ <p>
+ The <code>ApplicationMaster</code> sends a
+ {@link GetContainerStatusesRequest} which includes the {@link ContainerId}s
+ of all containers whose statuses are needed.
+ </p>
+
+ <p>
+ The <code>NodeManager</code> responds with
+ {@link GetContainerStatusesResponse} which includes a list of
+ {@link ContainerStatus} of the successfully queried containers and a
+ containerId-to-exception map for each failed request in which the exception
+ indicates errors from per container. Note: None-container-specific
+ exceptions will still be thrown by the API method itself.
+ </p>
+
+ @param request
+          request to get <code>ContainerStatus</code>es of containers with
+          the specified <code>ContainerId</code>s
+ @return response containing the list of <code>ContainerStatus</code> of the
+         successfully queried containers and a containerId-to-exception map
+         for failed requests.
+
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The protocol between an <code>ApplicationMaster</code> and a
+ <code>NodeManager</code> to start/stop containers and to get status
+ of running containers.</p>
+
+ <p>If security is enabled the <code>NodeManager</code> verifies that the
+ <code>ApplicationMaster</code> has truly been allocated the container
+ by the <code>ResourceManager</code> and also verifies all interactions such
+ as stopping the container or obtaining status information for the container.
+ </p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ContainerManagementProtocol -->
+</package>
+<package name="org.apache.hadoop.yarn.api.protocolrecords">
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest -->
+  <class name="AllocateRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AllocateRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseID" type="int"/>
+      <param name="appProgress" type="float"/>
+      <param name="resourceAsk" type="java.util.List"/>
+      <param name="containersToBeReleased" type="java.util.List"/>
+      <param name="resourceBlacklistRequest" type="org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseID" type="int"/>
+      <param name="appProgress" type="float"/>
+      <param name="resourceAsk" type="java.util.List"/>
+      <param name="containersToBeReleased" type="java.util.List"/>
+      <param name="resourceBlacklistRequest" type="org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest"/>
+      <param name="increaseRequests" type="java.util.List"/>
+    </method>
+    <method name="getResponseId" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>response id</em> used to track duplicate responses.
+ @return <em>response id</em>]]>
+      </doc>
+    </method>
+    <method name="setResponseId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="id" type="int"/>
+      <doc>
+      <![CDATA[Set the <em>response id</em> used to track duplicate responses.
+ @param id <em>response id</em>]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>current progress</em> of application.
+ @return <em>current progress</em> of application]]>
+      </doc>
+    </method>
+    <method name="setProgress"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="progress" type="float"/>
+      <doc>
+      <![CDATA[Set the <em>current progress</em> of application
+ @param progress <em>current progress</em> of application]]>
+      </doc>
+    </method>
+    <method name="getAskList" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <code>ResourceRequest</code> to update the
+ <code>ResourceManager</code> about the application's resource requirements.
+ @return the list of <code>ResourceRequest</code>
+ @see ResourceRequest]]>
+      </doc>
+    </method>
+    <method name="setAskList"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceRequests" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set list of <code>ResourceRequest</code> to update the
+ <code>ResourceManager</code> about the application's resource requirements.
+ @param resourceRequests list of <code>ResourceRequest</code> to update the
+                        <code>ResourceManager</code> about the application's
+                        resource requirements
+ @see ResourceRequest]]>
+      </doc>
+    </method>
+    <method name="getReleaseList" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <code>ContainerId</code> of containers being
+ released by the <code>ApplicationMaster</code>.
+ @return list of <code>ContainerId</code> of containers being
+         released by the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="setReleaseList"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="releaseContainers" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the list of <code>ContainerId</code> of containers being
+ released by the <code>ApplicationMaster</code>
+ @param releaseContainers list of <code>ContainerId</code> of
+                          containers being released by the
+                          <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="getResourceBlacklistRequest" return="org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ResourceBlacklistRequest</code> being sent by the
+ <code>ApplicationMaster</code>.
+ @return the <code>ResourceBlacklistRequest</code> being sent by the
+         <code>ApplicationMaster</code>
+ @see ResourceBlacklistRequest]]>
+      </doc>
+    </method>
+    <method name="setResourceBlacklistRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceBlacklistRequest" type="org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest"/>
+      <doc>
+      <![CDATA[Set the <code>ResourceBlacklistRequest</code> to inform the
+ <code>ResourceManager</code> about the blacklist additions and removals
+ per the <code>ApplicationMaster</code>.
+
+ @param resourceBlacklistRequest the <code>ResourceBlacklistRequest</code>
+                         to inform the <code>ResourceManager</code> about
+                         the blacklist additions and removals
+                         per the <code>ApplicationMaster</code>
+ @see ResourceBlacklistRequest]]>
+      </doc>
+    </method>
+    <method name="getIncreaseRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerResourceIncreaseRequest</code> being sent by the
+ <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="setIncreaseRequests"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="increaseRequests" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the <code>ContainerResourceIncreaseRequest</code> to inform the
+ <code>ResourceManager</code> about some container's resources need to be
+ increased]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The core request sent by the <code>ApplicationMaster</code> to the
+ <code>ResourceManager</code> to obtain resources in the cluster.</p>
+
+ <p>The request includes:
+   <ul>
+     <li>A response id to track duplicate responses.</li>
+     <li>Progress information.</li>
+     <li>
+       A list of {@link ResourceRequest} to inform the
+       <code>ResourceManager</code> about the application's
+       resource requirements.
+     </li>
+     <li>
+       A list of unused {@link Container} which are being returned.
+     </li>
+   </ul>
+ </p>
+
+ @see ApplicationMasterProtocol#allocate(AllocateRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse -->
+  <class name="AllocateResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AllocateResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseId" type="int"/>
+      <param name="completedContainers" type="java.util.List"/>
+      <param name="allocatedContainers" type="java.util.List"/>
+      <param name="updatedNodes" type="java.util.List"/>
+      <param name="availResources" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="command" type="org.apache.hadoop.yarn.api.records.AMCommand"/>
+      <param name="numClusterNodes" type="int"/>
+      <param name="preempt" type="org.apache.hadoop.yarn.api.records.PreemptionMessage"/>
+      <param name="nmTokens" type="java.util.List"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseId" type="int"/>
+      <param name="completedContainers" type="java.util.List"/>
+      <param name="allocatedContainers" type="java.util.List"/>
+      <param name="updatedNodes" type="java.util.List"/>
+      <param name="availResources" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="command" type="org.apache.hadoop.yarn.api.records.AMCommand"/>
+      <param name="numClusterNodes" type="int"/>
+      <param name="preempt" type="org.apache.hadoop.yarn.api.records.PreemptionMessage"/>
+      <param name="nmTokens" type="java.util.List"/>
+      <param name="increasedContainers" type="java.util.List"/>
+      <param name="decreasedContainers" type="java.util.List"/>
+    </method>
+    <method name="getAMCommand" return="org.apache.hadoop.yarn.api.records.AMCommand"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[If the <code>ResourceManager</code> needs the
+ <code>ApplicationMaster</code> to take some action then it will send an
+ AMCommand to the <code>ApplicationMaster</code>. See <code>AMCommand</code>
+ for details on commands and actions for them.
+ @return <code>AMCommand</code> if the <code>ApplicationMaster</code> should
+         take action, <code>null</code> otherwise
+ @see AMCommand]]>
+      </doc>
+    </method>
+    <method name="getResponseId" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>last response id</em>.
+ @return <em>last response id</em>]]>
+      </doc>
+    </method>
+    <method name="getAllocatedContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <em>newly allocated</em> <code>Container</code> by the
+ <code>ResourceManager</code>.
+ @return list of <em>newly allocated</em> <code>Container</code>]]>
+      </doc>
+    </method>
+    <method name="getAvailableResources" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>available headroom</em> for resources in the cluster for the
+ application.
+ @return limit of available headroom for resources in the cluster for the
+ application]]>
+      </doc>
+    </method>
+    <method name="getCompletedContainersStatuses" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <em>completed containers' statuses</em>.
+ @return the list of <em>completed containers' statuses</em>]]>
+      </doc>
+    </method>
+    <method name="getUpdatedNodes" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <em>updated <code>NodeReport</code>s</em>. Updates could
+ be changes in health, availability etc of the nodes.
+ @return The delta of updated nodes since the last response]]>
+      </doc>
+    </method>
+    <method name="getNumClusterNodes" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of hosts available on the cluster.
+ @return the available host count.]]>
+      </doc>
+    </method>
+    <method name="getPreemptionMessage" return="org.apache.hadoop.yarn.api.records.PreemptionMessage"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[<p>Get the description of containers owned by the AM, but requested back by
+ the cluster. Note that the RM may have an inconsistent view of the
+ resources owned by the AM. These messages are advisory, and the AM may
+ elect to ignore them.<p>
+
+ <p>The message is a snapshot of the resources the RM wants back from the AM.
+ While demand persists, the RM will repeat its request; applications should
+ not interpret each message as a request for <em>additional<em>
+ resources on top of previous messages. Resources requested consistently
+ over some duration may be forcibly killed by the RM.<p>
+
+ @return A specification of the resources to reclaim from this AM.]]>
+      </doc>
+    </method>
+    <method name="getNMTokens" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[<p>Get the list of NMTokens required for communicating with NM. New NMTokens
+ issued only if<p>
+ <p>1) AM is receiving first container on underlying NodeManager.<br>
+ OR<br>
+ 2) NMToken master key rolled over in ResourceManager and AM is getting new
+ container on the same underlying NodeManager.<p>
+ <p>AM will receive one NMToken per NM irrespective of the number of containers
+ issued on same NM. AM is expected to store these tokens until issued a
+ new token for the same NM.<p>]]>
+      </doc>
+    </method>
+    <method name="getIncreasedContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of newly increased containers by <code>ResourceManager</code>]]>
+      </doc>
+    </method>
+    <method name="getDecreasedContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of newly decreased containers by <code>NodeManager</code>]]>
+      </doc>
+    </method>
+    <method name="getAMRMToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The AMRMToken that belong to this attempt
+
+ @return The AMRMToken that belong to this attempt]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The response sent by the <code>ResourceManager</code> the
+ <code>ApplicationMaster</code> during resource negotiation.</p>
+
+ <p>The response, includes:
+   <ul>
+     <li>Response ID to track duplicate responses.</li>
+     <li>
+       An AMCommand sent by ResourceManager to let the <code>ApplicationMaster</code>
+       take some actions (resync, shutdown etc.).
+     <li>A list of newly allocated {@link Container}.</li>
+     <li>A list of completed {@link Container}s' statuses.</li>
+     <li>
+       The available headroom for resources in the cluster for the
+       application.
+     </li>
+     <li>A list of nodes whose status has been updated.</li>
+     <li>The number of available nodes in a cluster.</li>
+     <li>A description of resources requested back by the cluster</li>
+     <li>AMRMToken, if AMRMToken has been rolled over</li>
+   </ul>
+ </p>
+
+ @see ApplicationMasterProtocol#allocate(AllocateRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope -->
+  <class name="ApplicationsRequestScope" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Enumeration that controls the scope of applications fetched]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest -->
+  <class name="FinishApplicationMasterRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FinishApplicationMasterRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="finalAppStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <param name="diagnostics" type="java.lang.String"/>
+      <param name="url" type="java.lang.String"/>
+    </method>
+    <method name="getFinalApplicationStatus" return="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <em>final state</em> of the <code>ApplicationMaster</code>.
+ @return <em>final state</em> of the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="setFinalApplicationStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="finalState" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <doc>
+      <![CDATA[Set the <em>final state</em> of the <code>ApplicationMaster</code>
+ @param finalState <em>final state</em> of the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="getDiagnostics" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <em>diagnostic information</em> on application failure.
+ @return <em>diagnostic information</em> on application failure]]>
+      </doc>
+    </method>
+    <method name="setDiagnostics"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="diagnostics" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set <em>diagnostic information</em> on application failure.
+ @param diagnostics <em>diagnostic information</em> on application failure]]>
+      </doc>
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>tracking URL</em> for the <code>ApplicationMaster</code>.
+ This url if contains scheme then that will be used by resource manager
+ web application proxy otherwise it will default to http.
+ @return <em>tracking URL</em>for the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="setTrackingUrl"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <em>final tracking URL</em>for the <code>ApplicationMaster</code>.
+ This is the web-URL to which ResourceManager or web-application proxy will
+ redirect client/users once the application is finished and the
+ <code>ApplicationMaster</code> is gone.
+ <p>
+ If the passed url has a scheme then that will be used by the
+ ResourceManager and web-application proxy, otherwise the scheme will
+ default to http.
+ </p>
+ <p>
+ Empty, null, "N/A" strings are all valid besides a real URL. In case an url
+ isn't explicitly passed, it defaults to "N/A" on the ResourceManager.
+ <p>
+
+ @param url
+          <em>tracking URL</em>for the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The finalization request sent by the <code>ApplicationMaster</code> to
+ inform the <code>ResourceManager</code> about its completion.</p>
+
+ <p>The final request includes details such:
+   <ul>
+     <li>Final state of the <code>ApplicationMaster</code></li>
+     <li>
+       Diagnostic information in

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/25] hadoop git commit: HADOOP-13249. RetryInvocationHandler need wrap InterruptedException in IOException when call Thread.sleep. Contributed by Zhihai Xu.

Posted by ae...@apache.org.
HADOOP-13249. RetryInvocationHandler need wrap InterruptedException in IOException when call Thread.sleep. Contributed by Zhihai Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bbb4ddd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bbb4ddd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bbb4ddd

Branch: refs/heads/HDFS-1312
Commit: 0bbb4ddd793063c87927035969884a34f60f2076
Parents: 03fc6b1
Author: Jing Zhao <ji...@apache.org>
Authored: Fri Jun 10 10:38:13 2016 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Fri Jun 10 10:38:13 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/io/retry/RetryInvocationHandler.java  | 12 +++++++++++-
 .../java/org/apache/hadoop/io/retry/TestRetryProxy.java |  7 +++++--
 2 files changed, 16 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bbb4ddd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index f2b2c99..5198c0d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.ipc.*;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
@@ -297,7 +298,16 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
     log(method, isFailover, counters.failovers, retryInfo.delay, ex);
 
     if (retryInfo.delay > 0) {
-      Thread.sleep(retryInfo.delay);
+      try {
+        Thread.sleep(retryInfo.delay);
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+        LOG.warn("Interrupted while waiting to retry", e);
+        InterruptedIOException intIOE = new InterruptedIOException(
+            "Retry interrupted");
+        intIOE.initCause(e);
+        throw intIOE;
+      }
     }
 
     if (isFailover) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bbb4ddd/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
index 41c1be4..649af89 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
@@ -31,6 +31,7 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.util.Collections;
 import java.util.Map;
@@ -320,7 +321,9 @@ public class TestRetryProxy {
     futureThread.get().interrupt();
     Throwable e = future.get(1, TimeUnit.SECONDS); // should return immediately 
     assertNotNull(e);
-    assertEquals(InterruptedException.class, e.getClass());
-    assertEquals("sleep interrupted", e.getMessage());
+    assertEquals(InterruptedIOException.class, e.getClass());
+    assertEquals("Retry interrupted", e.getMessage());
+    assertEquals(InterruptedException.class, e.getCause().getClass());
+    assertEquals("sleep interrupted", e.getCause().getMessage());
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/25] hadoop git commit: YARN-3426. Add jdiff support to YARN. (vinodkv via wangda)

Posted by ae...@apache.org.
YARN-3426. Add jdiff support to YARN. (vinodkv via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03fc6b1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03fc6b1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03fc6b1b

Branch: refs/heads/HDFS-1312
Commit: 03fc6b1bb0f5c0844cd5477ffba43de8a14d4d60
Parents: 244506f
Author: Wangda Tan <wa...@apache.org>
Authored: Fri Jun 10 09:51:09 2016 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Fri Jun 10 09:51:09 2016 -0700

----------------------------------------------------------------------
 .../classification/tools/RootDocProcessor.java  |     4 +
 .../jdiff/Apache_Hadoop_YARN_API_2.6.0.xml      | 13076 ++++++++++++++++
 .../jdiff/Apache_Hadoop_YARN_API_2.7.2.xml      | 13692 +++++++++++++++++
 .../jdiff/Apache_Hadoop_YARN_Client.2.6.0.xml   |  2427 +++
 .../jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml   |  2581 ++++
 .../jdiff/Apache_Hadoop_YARN_Common_2.6.0.xml   |  2870 ++++
 .../jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml   |  3323 ++++
 .../Apache_Hadoop_YARN_Server_Common_2.6.0.xml  |  2059 +++
 .../Apache_Hadoop_YARN_Server_Common_2.7.2.xml  |  1801 +++
 .../hadoop-yarn/dev-support/jdiff/Null.java     |    20 +
 .../hadoop-yarn/hadoop-yarn-api/pom.xml         |     2 +
 .../hadoop-yarn/hadoop-yarn-client/pom.xml      |     2 +
 .../hadoop-yarn/hadoop-yarn-common/pom.xml      |     2 +
 .../hadoop-yarn-server-common/pom.xml           |     2 +
 hadoop-yarn-project/hadoop-yarn/pom.xml         |   129 +
 15 files changed, 41990 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03fc6b1b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
index 8042f17..60c2a6f 100644
--- a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
+++ b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
@@ -127,6 +127,10 @@ class RootDocProcessor {
               return filter(((ClassDoc) target).constructors(true),
                   ConstructorDoc.class);
             }
+          } else {
+            if (methodName.equals("methods")) {
+              return filter(((ClassDoc) target).methods(true), MethodDoc.class);
+            }
           }
         } else if (target instanceof PackageDoc) {
           if (methodName.equals("allClasses")) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/25] hadoop git commit: YARN-3426. Add jdiff support to YARN. (vinodkv via wangda)

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/03fc6b1b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.6.0.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.6.0.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.6.0.xml
new file mode 100644
index 0000000..9e37e8a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.6.0.xml
@@ -0,0 +1,2870 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Wed Apr 08 11:30:04 PDT 2015 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="hadoop-yarn-common 2.6.0"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-annotations.jar:/Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/jdiff.jar -verbose -classpath /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/classes:/Users/llu/.m2/repository/org/apache/hadoop/hadoop-common/2.6.0/hadoop-common-2.6.0.jar:/Users/llu/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/llu/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/llu/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/llu/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/llu/.m2/repository/commons-collections/commons-collections/3.2.1/commons-collections-3.2.1.jar:/Users/llu/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty
 -6.1.26.jar:/Users/llu/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/llu/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/llu/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/llu/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/llu/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/llu/.m2/repository/org/slf4j/slf4j-log4j12/1.7.5/slf4j-log4j12-1.7.5.jar:/Users/llu/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/llu/.m2/repository/c
 om/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/llu/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/llu/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/llu/.m2/repository/org/apache/hadoop/hadoop-auth/2.6.0/hadoop-auth-2.6.0.jar:/Users/llu/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/llu/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/llu/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/llu/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/llu/.m2/repository/org/apache/curator/curator-framework/2.6.0/curator-framework-2.6.0.jar:/Users/llu/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/llu/.m2/repository/org/apache/curator/curator-client/2.6.0/curator-client-2.6.0.jar:/Users/llu/.m2/
 repository/org/apache/curator/curator-recipes/2.6.0/curator-recipes-2.6.0.jar:/Users/llu/.m2/repository/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar:/Users/llu/.m2/repository/org/htrace/htrace-core/3.0.4/htrace-core-3.0.4.jar:/Users/llu/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/llu/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.6.0.jar:/Users/llu/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/llu/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/llu/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/llu/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/llu/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/llu/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/llu/.m2/repository/javax/servlet/serv
 let-api/2.5/servlet-api-2.5.jar:/Users/llu/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/llu/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/llu/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/llu/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/llu/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/llu/.m2/repositor
 y/org/slf4j/slf4j-api/1.7.5/slf4j-api-1.7.5.jar:/Users/llu/.m2/repository/org/apache/hadoop/hadoop-annotations/2.6.0/hadoop-annotations-2.6.0.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_67.jdk/Contents/Home/lib/tools.jar:/Users/llu/.m2/repository/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/llu/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/llu/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/llu/.m2/repository/com/google/inject/guice/3.0/guice-3.0.jar:/Users/llu/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/llu/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/llu/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/llu/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/llu/.m2/r
 epository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/llu/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/llu/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar -sourcepath /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java -apidir /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/site/jdiff/xml -apiname hadoop-yarn-common 2.6.0 -->
+<package name="org.apache.hadoop.yarn">
+  <!-- start class org.apache.hadoop.yarn.ContainerLogAppender -->
+  <class name="ContainerLogAppender" extends="org.apache.log4j.FileAppender"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Flushable"/>
+    <constructor name="ContainerLogAppender"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="activateOptions"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="append"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+    </method>
+    <method name="flush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerLogDir" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Getter/Setter methods for log4j.]]>
+      </doc>
+    </method>
+    <method name="setContainerLogDir"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogDir" type="java.lang.String"/>
+    </method>
+    <method name="getTotalLogFileSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setTotalLogFileSize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="logSize" type="long"/>
+    </method>
+    <doc>
+    <![CDATA[A simple log4j-appender for container's logs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.ContainerLogAppender -->
+  <!-- start class org.apache.hadoop.yarn.ContainerRollingLogAppender -->
+  <class name="ContainerRollingLogAppender" extends="org.apache.log4j.RollingFileAppender"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Flushable"/>
+    <constructor name="ContainerRollingLogAppender"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="activateOptions"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="flush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerLogDir" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Getter/Setter methods for log4j.]]>
+      </doc>
+    </method>
+    <method name="setContainerLogDir"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogDir" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[A simple log4j-appender for container's logs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.ContainerRollingLogAppender -->
+  <!-- start class org.apache.hadoop.yarn.YarnUncaughtExceptionHandler -->
+  <class name="YarnUncaughtExceptionHandler" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Thread.UncaughtExceptionHandler"/>
+    <constructor name="YarnUncaughtExceptionHandler"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="uncaughtException"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="t" type="java.lang.Thread"/>
+      <param name="e" type="java.lang.Throwable"/>
+    </method>
+    <doc>
+    <![CDATA[This class is intended to be installed by calling
+ {@link Thread#setDefaultUncaughtExceptionHandler(UncaughtExceptionHandler)}
+ In the main entry point.  It is intended to try and cleanly shut down
+ programs using the Yarn Event framework.
+
+ Note: Right now it only will shut down the program if a Error is caught, but
+ not any other exception.  Anything else is just logged.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.YarnUncaughtExceptionHandler -->
+</package>
+<package name="org.apache.hadoop.yarn.api">
+</package>
+<package name="org.apache.hadoop.yarn.client">
+  <!-- start class org.apache.hadoop.yarn.client.AHSProxy -->
+  <class name="AHSProxy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AHSProxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAHSProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="ahsAddress" type="java.net.InetSocketAddress"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="rmAddress" type="java.net.InetSocketAddress"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.AHSProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.ClientRMProxy -->
+  <class name="ClientRMProxy" extends="org.apache.hadoop.yarn.client.RMProxy"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="createRMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="configuration" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a proxy to the ResourceManager for the specified protocol.
+ @param configuration Configuration with all the required information.
+ @param protocol Client protocol for which proxy is being requested.
+ @param <T> Type of proxy.
+ @return Proxy to the ResourceManager for the specified client protocol.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRMDelegationTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the token service name to be used for RMDelegationToken. Depending
+ on whether HA is enabled or not, this method generates the appropriate
+ service name as a comma-separated list of service addresses.
+
+ @param conf Configuration corresponding to the cluster we need the
+             RMDelegationToken for
+ @return - Service name for RMDelegationToken]]>
+      </doc>
+    </method>
+    <method name="getAMRMTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="address" type="java.lang.String"/>
+      <param name="defaultAddr" type="java.lang.String"/>
+      <param name="defaultPort" type="int"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.ClientRMProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.NMProxy -->
+  <class name="NMProxy" extends="org.apache.hadoop.yarn.client.ServerProxy"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMProxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createNMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="rpc" type="org.apache.hadoop.yarn.ipc.YarnRPC"/>
+      <param name="serverAddress" type="java.net.InetSocketAddress"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.NMProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.RMHAServiceTarget -->
+  <class name="RMHAServiceTarget" extends="org.apache.hadoop.ha.HAServiceTarget"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RMHAServiceTarget" type="org.apache.hadoop.yarn.conf.YarnConfiguration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="getAddress" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getZKFCAddress" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFencer" return="org.apache.hadoop.ha.NodeFencer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="checkFencingConfigured"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="BadFencingConfigurationException" type="org.apache.hadoop.ha.BadFencingConfigurationException"/>
+    </method>
+    <method name="isAutoFailoverEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.RMHAServiceTarget -->
+  <!-- start class org.apache.hadoop.yarn.client.RMProxy -->
+  <class name="RMProxy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RMProxy"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="This method is deprecated and is not used by YARN internally any more.
+ To create a proxy to the RM, use ClientRMProxy#createRMProxy or
+ ServerRMProxy#createRMProxy.
+
+ Create a proxy to the ResourceManager at the specified address.">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="rmAddress" type="java.net.InetSocketAddress"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@deprecated
+ This method is deprecated and is not used by YARN internally any more.
+ To create a proxy to the RM, use ClientRMProxy#createRMProxy or
+ ServerRMProxy#createRMProxy.
+
+ Create a proxy to the ResourceManager at the specified address.
+
+ @param conf Configuration to generate retry policy
+ @param protocol Protocol for the proxy
+ @param rmAddress Address of the ResourceManager
+ @param <T> Type information of the proxy
+ @return Proxy to the RM
+ @throws IOException]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.RMProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.ServerProxy -->
+  <class name="ServerProxy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ServerProxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRetryPolicy" return="org.apache.hadoop.io.retry.RetryPolicy"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="maxWaitTimeStr" type="java.lang.String"/>
+      <param name="defMaxWaitTime" type="long"/>
+      <param name="connectRetryIntervalStr" type="java.lang.String"/>
+      <param name="defRetryInterval" type="long"/>
+    </method>
+    <method name="createRetriableProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="user" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="rpc" type="org.apache.hadoop.yarn.ipc.YarnRPC"/>
+      <param name="serverAddress" type="java.net.InetSocketAddress"/>
+      <param name="retryPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.ServerProxy -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api">
+  <!-- start class org.apache.hadoop.yarn.client.api.TimelineClient -->
+  <class name="TimelineClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createTimelineClient" return="org.apache.hadoop.yarn.client.api.TimelineClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="putEntities" return="org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entities" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEntity[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a number of conceptual entities to the timeline
+ server. It is a blocking API. The method will not return until it gets the
+ response from the timeline server.
+ </p>
+
+ @param entities
+          the collection of {@link TimelineEntity}
+ @return the error information if the sent entities are not correctly stored
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="putDomain"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="domain" type="org.apache.hadoop.yarn.api.records.timeline.TimelineDomain"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a domain to the timeline server. It is a
+ blocking API. The method will not return until it gets the response from
+ the timeline server.
+ </p>
+
+ @param domain
+          an {@link TimelineDomain} object
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a delegation token so as to be able to talk to the timeline server in a
+ secure way.
+ </p>
+
+ @param renewer
+          Address of the renewer who can renew these tokens when needed by
+          securely talking to the timeline server
+ @return a delegation token ({@link Token}) that can be used to talk to the
+         timeline server
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="renewDelegationToken" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineDT" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Renew a timeline delegation token.
+ </p>
+
+ @param timelineDT
+          the delegation token to renew
+ @return the new expiration time
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="cancelDelegationToken"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineDT" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Cancel a timeline delegation token.
+ </p>
+
+ @param timelineDT
+          the delegation token to cancel
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A client library that can be used to post some information in terms of a
+ number of conceptual entities.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.TimelineClient -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api.impl">
+</package>
+<package name="org.apache.hadoop.yarn.event">
+  <!-- start class org.apache.hadoop.yarn.event.AbstractEvent -->
+  <class name="AbstractEvent" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.event.Event"/>
+    <constructor name="AbstractEvent" type="TYPE"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AbstractEvent" type="TYPE, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getTimestamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getType" return="TYPE"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Parent class of all the events. All events extend this class.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.event.AbstractEvent -->
+  <!-- start class org.apache.hadoop.yarn.event.AsyncDispatcher -->
+  <class name="AsyncDispatcher" extends="org.apache.hadoop.service.AbstractService"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.event.Dispatcher"/>
+    <constructor name="AsyncDispatcher"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AsyncDispatcher" type="java.util.concurrent.BlockingQueue"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="serviceInit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="serviceStart"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="setDrainEventsOnStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="serviceStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="dispatch"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.hadoop.yarn.event.Event"/>
+    </method>
+    <method name="register"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventType" type="java.lang.Class"/>
+      <param name="handler" type="org.apache.hadoop.yarn.event.EventHandler"/>
+    </method>
+    <method name="getEventHandler" return="org.apache.hadoop.yarn.event.EventHandler"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="eventDispatchers" type="java.util.Map"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Dispatches {@link Event}s in a separate thread. Currently only single thread
+ does that. Potentially there could be multiple channels for each event type
+ class and a thread pool can be used to dispatch the events.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.event.AsyncDispatcher -->
+  <!-- start interface org.apache.hadoop.yarn.event.Dispatcher -->
+  <interface name="Dispatcher"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getEventHandler" return="org.apache.hadoop.yarn.event.EventHandler"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="register"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventType" type="java.lang.Class"/>
+      <param name="handler" type="org.apache.hadoop.yarn.event.EventHandler"/>
+    </method>
+    <field name="DISPATCHER_EXIT_ON_ERROR_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_DISPATCHER_EXIT_ON_ERROR" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Event Dispatcher interface. It dispatches events to registered
+ event handlers based on event types.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.event.Dispatcher -->
+  <!-- start interface org.apache.hadoop.yarn.event.Event -->
+  <interface name="Event"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getType" return="TYPE"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTimestamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Interface defining events api.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.event.Event -->
+  <!-- start interface org.apache.hadoop.yarn.event.EventHandler -->
+  <interface name="EventHandler"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="handle"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="event" type="T"/>
+    </method>
+    <doc>
+    <![CDATA[Interface for handling events of type T
+
+ @param <T> parameterized event of type T]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.event.EventHandler -->
+</package>
+<package name="org.apache.hadoop.yarn.logaggregation">
+  <!-- start class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat -->
+  <class name="AggregatedLogFormat" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AggregatedLogFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat -->
+  <!-- start class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey -->
+  <class name="AggregatedLogFormat.LogKey" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="AggregatedLogFormat.LogKey"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AggregatedLogFormat.LogKey" type="org.apache.hadoop.yarn.api.records.ContainerId"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AggregatedLogFormat.LogKey" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey -->
+  <!-- start class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader -->
+  <class name="AggregatedLogFormat.LogReader" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AggregatedLogFormat.LogReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="getApplicationOwner" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns the owner of the application.
+
+ @return the application owner.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAcls" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns ACLs for the application. An empty map is returned if no ACLs are
+ found.
+
+ @return a map of the Application ACLs.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="next" return="java.io.DataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read the next key and return the value-stream.
+
+ @param key
+ @return the valueStream if there are more keys or null otherwise.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAcontainerLogs"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="writer" type="java.io.Writer"/>
+      <param name="logUploadedTime" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes all logs for a single container to the provided writer.
+ @param valueStream
+ @param writer
+ @param logUploadedTime
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAcontainerLogs"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="writer" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes all logs for a single container to the provided writer.
+ @param valueStream
+ @param writer
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAContainerLogsForALogType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <param name="logUploadedTime" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ all types for a single container.
+
+ @param valueStream
+ @param out
+ @param logUploadedTime
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAContainerLogsForALogType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ all types for a single container.
+
+ @param valueStream
+ @param out
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader -->
+  <!-- start class org.apache.hadoop.yarn.logaggregation.LogCLIHelpers -->
+  <class name="LogCLIHelpers" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="LogCLIHelpers"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.LogCLIHelpers -->
+</package>
+<package name="org.apache.hadoop.yarn.nodelabels">
+  <!-- start class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager -->
+  <class name="CommonNodeLabelsManager" extends="org.apache.hadoop.service.AbstractService"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CommonNodeLabelsManager"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="handleStoreEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent"/>
+    </method>
+    <method name="initDispatcher"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="serviceInit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="initNodeLabelStore"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="startDispatcher"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="serviceStart"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="stopDispatcher"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="serviceStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="addToCluserNodeLabels"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labels" type="java.util.Set"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Add multiple node labels to repository
+
+ @param labels
+          new node labels added]]>
+      </doc>
+    </method>
+    <method name="checkAddLabelsToNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="addedLabelsToNode" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="internalAddLabelsToNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="addedLabelsToNode" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="addLabelsToNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="addedLabelsToNode" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[add more labels to nodes
+
+ @param addedLabelsToNode node -> labels map]]>
+      </doc>
+    </method>
+    <method name="checkRemoveFromClusterNodeLabels"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="labelsToRemove" type="java.util.Collection"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="internalRemoveFromClusterNodeLabels"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="labelsToRemove" type="java.util.Collection"/>
+    </method>
+    <method name="removeFromClusterNodeLabels"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labelsToRemove" type="java.util.Collection"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Remove multiple node labels from repository
+
+ @param labelsToRemove
+          node labels to remove
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="checkRemoveLabelsFromNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="removeLabelsFromNode" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="internalRemoveLabelsFromNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="removeLabelsFromNode" type="java.util.Map"/>
+    </method>
+    <method name="removeLabelsFromNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="removeLabelsFromNode" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[remove labels from nodes, labels being removed most be contained by these
+ nodes
+
+ @param removeLabelsFromNode node -> labels map]]>
+      </doc>
+    </method>
+    <method name="checkReplaceLabelsOnNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="replaceLabelsToNode" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="internalReplaceLabelsOnNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="replaceLabelsToNode" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="replaceLabelsOnNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="replaceLabelsToNode" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[replace labels to nodes
+
+ @param replaceLabelsToNode node -> labels map]]>
+      </doc>
+    </method>
+    <method name="getNodeLabels" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get mapping of nodes to labels
+
+ @return nodes to labels map]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeLabels" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get existing valid labels in repository
+
+ @return existing valid labels in repository]]>
+      </doc>
+    </method>
+    <method name="normalizeLabel" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="label" type="java.lang.String"/>
+    </method>
+    <method name="getNMInNodeSet" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+    </method>
+    <method name="getNMInNodeSet" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <param name="map" type="java.util.Map"/>
+    </method>
+    <method name="getNMInNodeSet" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <param name="map" type="java.util.Map"/>
+      <param name="checkRunning" type="boolean"/>
+    </method>
+    <method name="getLabelsByNode" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+    </method>
+    <method name="getLabelsByNode" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <param name="map" type="java.util.Map"/>
+    </method>
+    <method name="createNodeIfNonExisted"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createHostIfNonExisted"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="hostName" type="java.lang.String"/>
+    </method>
+    <method name="normalizeNodeIdToLabels" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="nodeIdToLabels" type="java.util.Map"/>
+    </method>
+    <field name="LOG" type="org.apache.commons.logging.Log"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="EMPTY_STRING_SET" type="java.util.Set"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ANY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ACCESS_ANY_LABEL_SET" type="java.util.Set"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="WILDCARD_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NO_LABEL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[If a user doesn't specify label of a queue or node, it belongs
+ DEFAULT_LABEL]]>
+      </doc>
+    </field>
+    <field name="dispatcher" type="org.apache.hadoop.yarn.event.Dispatcher"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="labelCollections" type="java.util.concurrent.ConcurrentMap"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="nodeCollections" type="java.util.concurrent.ConcurrentMap"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="readLock" type="java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="writeLock" type="java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="store" type="org.apache.hadoop.yarn.nodelabels.NodeLabelsStore"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Host -->
+  <class name="CommonNodeLabelsManager.Host" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="protected"
+    deprecated="not deprecated">
+    <constructor name="CommonNodeLabelsManager.Host"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="copy" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Host"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="labels" type="java.util.Set"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="nms" type="java.util.Map"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A <code>Host</code> can have multiple <code>Node</code>s]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Host -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Label -->
+  <class name="CommonNodeLabelsManager.Label" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="protected"
+    deprecated="not deprecated">
+    <constructor name="CommonNodeLabelsManager.Label"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <field name="resource" type="org.apache.hadoop.yarn.api.records.Resource"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Label -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node -->
+  <class name="CommonNodeLabelsManager.Node" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="protected"
+    deprecated="not deprecated">
+    <constructor name="CommonNodeLabelsManager.Node"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="copy" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="labels" type="java.util.Set"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="resource" type="org.apache.hadoop.yarn.api.records.Resource"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="running" type="boolean"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore -->
+  <class name="FileSystemNodeLabelsStore" extends="org.apache.hadoop.yarn.nodelabels.NodeLabelsStore"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FileSystemNodeLabelsStore" type="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="updateNodeToLabelsMappings"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeToLabels" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="storeNewClusterNodeLabels"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labels" type="java.util.Set"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="removeClusterNodeLabels"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labels" type="java.util.Collection"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="recover"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="LOG" type="org.apache.commons.logging.Log"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_DIR_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="MIRROR_FILENAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="EDITLOG_FILENAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore.SerializedLogType -->
+  <class name="FileSystemNodeLabelsStore.SerializedLogType" extends="java.lang.Enum"
+    abstract="false"
+    static="true" final="true" visibility="protected"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore.SerializedLogType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore.SerializedLogType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore.SerializedLogType -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.NodeLabelsStore -->
+  <class name="NodeLabelsStore" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Closeable"/>
+    <constructor name="NodeLabelsStore" type="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="updateNodeToLabelsMappings"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeToLabels" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Store node -> label]]>
+      </doc>
+    </method>
+    <method name="storeNewClusterNodeLabels"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="label" type="java.util.Set"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Store new labels]]>
+      </doc>
+    </method>
+    <method name="removeClusterNodeLabels"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labels" type="java.util.Collection"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Remove labels]]>
+      </doc>
+    </method>
+    <method name="recover"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Recover labels and node to labels mappings from store
+ @param conf]]>
+      </doc>
+    </method>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="getNodeLabelsManager" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="mgr" type="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="conf" type="org.apache.hadoop.conf.Configuration"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.NodeLabelsStore -->
+</package>
+<package name="org.apache.hadoop.yarn.nodelabels.event">
+  <!-- start class org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent -->
+  <class name="NodeLabelsStoreEvent" extends="org.apache.hadoop.yarn.event.AbstractEvent"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeLabelsStoreEvent" type="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType -->
+  <class name="NodeLabelsStoreEventType" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.event.RemoveClusterNodeLabels -->
+  <class name="RemoveClusterNodeLabels" extends="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RemoveClusterNodeLabels" type="java.util.Collection"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getLabels" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.event.RemoveClusterNodeLabels -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.event.StoreNewClusterNodeLabels -->
+  <class name="StoreNewClusterNodeLabels" extends="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="StoreNewClusterNodeLabels" type="java.util.Set"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getLabels" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.event.StoreNewClusterNodeLabels -->
+  <!-- start class org.apache.hadoop.yarn.nodelabels.event.UpdateNodeToLabelsMappingsEvent -->
+  <class name="UpdateNodeToLabelsMappingsEvent" extends="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UpdateNodeToLabelsMappingsEvent" type="java.util.Map"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNodeToLabels" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.nodelabels.event.UpdateNodeToLabelsMappingsEvent -->
+</package>
+<package name="org.apache.hadoop.yarn.security">
+  <!-- start class org.apache.hadoop.yarn.security.AMRMTokenIdentifier -->
+  <class name="AMRMTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.AMRMTokenIdentifierProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND_NAME" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[AMRMTokenIdentifier is the TokenIdentifier to be used by
+ ApplicationMasters to authenticate to the ResourceManager.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.AMRMTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.AMRMTokenSelector -->
+  <class name="AMRMTokenSelector" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+    <constructor name="AMRMTokenSelector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.AMRMTokenSelector -->
+  <!-- start class org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo -->
+  <class name="ContainerManagerSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerManagerSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo -->
+  <!-- start class org.apache.hadoop.yarn.security.ContainerTokenIdentifier -->
+  <class name="ContainerTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor needed by RPC layer/SecretManager.]]>
+      </doc>
+    </constructor>
+    <method name="getContainerID" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationSubmitter" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNmHostAddress" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getResource" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getExpiryTimeStamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getMasterKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getCreationTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRMIdentifier" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the RMIdentifier of RM in which containers are allocated
+ @return RMIdentifier]]>
+      </doc>
+    </method>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.ContainerTokenIdentifierProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getLogAggregationContext" return="org.apache.hadoop.yarn.api.records.LogAggregationContext"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[TokenIdentifier for a container. Encodes {@link ContainerId},
+ {@link Resource} needed by the container and the target NMs host-address.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.ContainerTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.ContainerTokenSelector -->
+  <class name="ContainerTokenSelector" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+    <constructor name="ContainerTokenSelector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.ContainerTokenSelector -->
+  <!-- start class org.apache.hadoop.yarn.security.NMTokenIdentifier -->
+  <class name="NMTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId, org.apache.hadoop.yarn.api.records.NodeId, java.lang.String, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="NMTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor needed by RPC/Secret manager]]>
+      </doc>
+    </constructor>
+    <method name="getApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationSubmitter" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/25] hadoop git commit: YARN-3426. Add jdiff support to YARN. (vinodkv via wangda)

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/03fc6b1b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Null.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Null.java b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Null.java
new file mode 100644
index 0000000..7b00145
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Null.java
@@ -0,0 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+public class Null {
+  public Null() { }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03fc6b1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index 187dbbb..41aef33 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -30,6 +30,8 @@
   <properties>
     <!-- Needed for generating FindBugs warnings using parent pom -->
     <yarn.basedir>${project.parent.basedir}</yarn.basedir>
+    <should.run.jdiff>true</should.run.jdiff>
+    <dev-support.relative.dir>../dev-support</dev-support.relative.dir>
   </properties>
 
   <dependencies>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03fc6b1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
index d6ff6af..df15c7c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
@@ -27,6 +27,8 @@
   <properties>
     <!-- Needed for generating FindBugs warnings using parent pom -->
     <yarn.basedir>${project.parent.basedir}</yarn.basedir>
+    <should.run.jdiff>true</should.run.jdiff>
+    <dev-support.relative.dir>../dev-support</dev-support.relative.dir>
   </properties>
 
   <dependencies>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03fc6b1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index f13d6ec..17fc6e2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -30,6 +30,8 @@
   <properties>
     <!-- Needed for generating FindBugs warnings using parent pom -->
     <yarn.basedir>${project.parent.basedir}</yarn.basedir>
+    <should.run.jdiff>true</should.run.jdiff>
+    <dev-support.relative.dir>../dev-support</dev-support.relative.dir>
   </properties>
 
   <dependencies>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03fc6b1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index ad9f977..f792ccd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -30,6 +30,8 @@
   <properties>
     <!-- Needed for generating FindBugs warnings using parent pom -->
     <yarn.basedir>${project.parent.parent.basedir}</yarn.basedir>
+    <should.run.jdiff>true</should.run.jdiff>
+    <dev-support.relative.dir>../../dev-support</dev-support.relative.dir>
   </properties>
 
   <dependencies>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03fc6b1b/hadoop-yarn-project/hadoop-yarn/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml
index 0f79226..3e31ec0 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -29,6 +29,9 @@
     <test.logs>true</test.logs>
     <test.timeout>600000</test.timeout>
     <yarn.basedir>${basedir}</yarn.basedir>
+    <!-- Used by jdiff -->
+    <!-- Antrun cannot resolve yarn.basedir, so we need to setup something else -->
+    <dev-support.relative.dir>dev-support</dev-support.relative.dir>
     <hadoop.common.build.dir>${basedir}/../../../hadoop-common-project/hadoop-common/target</hadoop.common.build.dir>
   </properties>
 
@@ -53,6 +56,7 @@
           <excludes>
             <exclude>conf/slaves</exclude>
             <exclude>conf/container-executor.cfg</exclude>
+            <exclude>dev-support/jdiff/**</exclude>
           </excludes>
         </configuration>
       </plugin>
@@ -98,6 +102,131 @@
         </plugins>
       </build>
     </profile>
+
+    <profile>
+      <id>docs</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <properties>
+        <jdiff.stable.api>2.7.2</jdiff.stable.api>
+        <jdiff.stability>-unstable</jdiff.stability>
+        <jdiff.compatibility></jdiff.compatibility>
+        <jdiff.javadoc.maxmemory>512m</jdiff.javadoc.maxmemory>
+      </properties>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-javadoc-plugin</artifactId>
+            <executions>
+              <execution>
+                <goals>
+                  <goal>javadoc</goal>
+                </goals>
+                <phase>prepare-package</phase>
+              </execution>
+            </executions>
+          </plugin>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-dependency-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>site</id>
+                <phase>prepare-package</phase>
+                <goals>
+                  <goal>copy</goal>
+                </goals>
+                <configuration>
+                  <artifactItems>
+                    <artifactItem>
+                      <groupId>jdiff</groupId>
+                      <artifactId>jdiff</artifactId>
+                      <version>${jdiff.version}</version>
+                      <overWrite>false</overWrite>
+                      <outputDirectory>${project.build.directory}</outputDirectory>
+                      <destFileName>jdiff.jar</destFileName>
+                    </artifactItem>
+                    <artifactItem>
+                      <groupId>org.apache.hadoop</groupId>
+                      <artifactId>hadoop-annotations</artifactId>
+                      <version>${project.version}</version>
+                      <overWrite>false</overWrite>
+                      <outputDirectory>${project.build.directory}</outputDirectory>
+                      <destFileName>hadoop-annotations.jar</destFileName>
+                    </artifactItem>
+                    <artifactItem>
+                      <groupId>xerces</groupId>
+                      <artifactId>xercesImpl</artifactId>
+                      <version>${xerces.version.jdiff}</version>
+                      <overWrite>false</overWrite>
+                      <outputDirectory>${project.build.directory}</outputDirectory>
+                      <destFileName>xerces.jar</destFileName>
+                    </artifactItem>
+                  </artifactItems>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-antrun-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>site</id>
+                <phase>prepare-package</phase>
+                <goals>
+                  <goal>run</goal>
+                </goals>
+                <configuration>
+                  <target if="should.run.jdiff">
+
+                    <!-- Jdiff -->
+                    <mkdir dir="${project.build.directory}/site/jdiff/xml"/>
+
+                    <javadoc maxmemory="${jdiff.javadoc.maxmemory}" verbose="yes">
+                      <doclet name="org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet"
+                              path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar">
+                        <param name="-apidir" value="${project.build.directory}/site/jdiff/xml"/>
+                        <param name="-apiname" value="${project.name} ${project.version}"/>
+                        <param name="${jdiff.stability}"/>
+                      </doclet>
+                      <packageset dir="${basedir}/src/main/java"/>
+                      <classpath>
+                        <path refid="maven.compile.classpath"/>
+                      </classpath>
+                    </javadoc>
+                    <javadoc sourcepath="${basedir}/src/main/java"
+                      destdir="${project.build.directory}/site/jdiff/xml"
+                      sourceFiles="${dev-support.relative.dir}/jdiff/Null.java"
+                             maxmemory="${jdiff.javadoc.maxmemory}">
+                      <doclet name="org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet"
+                              path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar:${project.build.directory}/xerces.jar">
+                        <param name="-oldapi" value="${project.name} ${jdiff.stable.api}"/>
+                        <param name="-newapi" value="${project.name} ${project.version}"/>
+                        <param name="-oldapidir" value="${basedir}/${dev-support.relative.dir}/jdiff"/>
+                        <param name="-newapidir" value="${project.build.directory}/site/jdiff/xml"/>
+                        <param name="-javadocold"
+                               value="http://hadoop.apache.org/docs/r${jdiff.stable.api}/api/"/>
+                        <param name="-javadocnew" value="${project.build.directory}/site/apidocs/"/>
+                        <param name="-stats"/>
+                        <param name="${jdiff.stability}"/>
+                        <!--param name="${jdiff.compatibility}"/-->
+                      </doclet>
+                      <classpath>
+                        <path refid="maven.compile.classpath"/>
+                      </classpath>
+                    </javadoc>
+
+                  </target>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
   </profiles>
 
   <modules>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/25] hadoop git commit: YARN-5124. Modify AMRMClient to set the ExecutionType in the ResourceRequest. (asuresh)

Posted by ae...@apache.org.
YARN-5124. Modify AMRMClient to set the ExecutionType in the ResourceRequest. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51432779
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51432779
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51432779

Branch: refs/heads/HDFS-1312
Commit: 51432779588fdd741b4840601f5db637ec783d92
Parents: 5279af7
Author: Arun Suresh <as...@apache.org>
Authored: Sun Jun 12 09:42:38 2016 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Sun Jun 12 09:42:38 2016 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/client/api/AMRMClient.java      |  43 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |  17 +
 .../yarn/client/api/impl/AMRMClientImpl.java    | 294 +++------
 .../client/api/impl/RemoteRequestsTable.java    | 332 ++++++++++
 .../client/api/impl/BaseAMRMProxyE2ETest.java   | 197 ++++++
 .../yarn/client/api/impl/TestAMRMClient.java    |  26 +-
 .../impl/TestAMRMClientContainerRequest.java    |  54 +-
 .../yarn/client/api/impl/TestAMRMProxy.java     | 171 +----
 .../api/impl/TestDistributedScheduling.java     | 644 ++++++++++++-------
 .../yarn/client/api/impl/TestNMClient.java      |   7 +-
 .../records/impl/pb/ResourceRequestPBImpl.java  |   5 +-
 11 files changed, 1204 insertions(+), 586 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51432779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
index 3ec0899..5f362c8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -109,7 +110,7 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
     final Priority priority;
     final boolean relaxLocality;
     final String nodeLabelsExpression;
-    final ExecutionType executionType;
+    final ExecutionTypeRequest executionTypeRequest;
     
     /**
      * Instantiates a {@link ContainerRequest} with the given constraints and
@@ -180,7 +181,7 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
         Priority priority, boolean relaxLocality, String nodeLabelsExpression) {
       this(capability, nodes, racks, priority, relaxLocality,
           nodeLabelsExpression,
-          ExecutionType.GUARANTEED);
+          ExecutionTypeRequest.newInstance());
     }
           
     /**
@@ -203,12 +204,12 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
      * @param nodeLabelsExpression
      *          Set node labels to allocate resource, now we only support
      *          asking for only a single node label
-     * @param executionType
+     * @param executionTypeRequest
      *          Set the execution type of the container request.
      */
     public ContainerRequest(Resource capability, String[] nodes, String[] racks,
         Priority priority, boolean relaxLocality, String nodeLabelsExpression,
-        ExecutionType executionType) {
+        ExecutionTypeRequest executionTypeRequest) {
       // Validate request
       Preconditions.checkArgument(capability != null,
           "The Resource to be requested for each container " +
@@ -226,7 +227,7 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
       this.priority = priority;
       this.relaxLocality = relaxLocality;
       this.nodeLabelsExpression = nodeLabelsExpression;
-      this.executionType = executionType;
+      this.executionTypeRequest = executionTypeRequest;
     }
     
     public Resource getCapability() {
@@ -253,15 +254,16 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
       return nodeLabelsExpression;
     }
     
-    public ExecutionType getExecutionType() {
-      return executionType;
+    public ExecutionTypeRequest getExecutionTypeRequest() {
+      return executionTypeRequest;
     }
 
     public String toString() {
       StringBuilder sb = new StringBuilder();
       sb.append("Capability[").append(capability).append("]");
       sb.append("Priority[").append(priority).append("]");
-      sb.append("ExecutionType[").append(executionType).append("]");
+      sb.append("ExecutionTypeRequest[").append(executionTypeRequest)
+          .append("]");
       return sb.toString();
     }
   }
@@ -388,10 +390,35 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
    * collection, requests will be returned in the same order as they were added.
    * @return Collection of request matching the parameters
    */
+  @InterfaceStability.Evolving
   public abstract List<? extends Collection<T>> getMatchingRequests(
                                            Priority priority, 
                                            String resourceName, 
                                            Resource capability);
+
+  /**
+   * Get outstanding <code>ContainerRequest</code>s matching the given
+   * parameters. These ContainerRequests should have been added via
+   * <code>addContainerRequest</code> earlier in the lifecycle. For performance,
+   * the AMRMClient may return its internal collection directly without creating
+   * a copy. Users should not perform mutable operations on the return value.
+   * Each collection in the list contains requests with identical
+   * <code>Resource</code> size that fit in the given capability. In a
+   * collection, requests will be returned in the same order as they were added.
+   * specify an <code>ExecutionType</code> .
+   * @param priority Priority
+   * @param resourceName Location
+   * @param executionType ExecutionType
+   * @param capability Capability
+   * @return Collection of request matching the parameters
+   */
+  @InterfaceStability.Evolving
+  public List<? extends Collection<T>> getMatchingRequests(
+      Priority priority, String resourceName, ExecutionType executionType,
+      Resource capability) {
+    throw new UnsupportedOperationException("The sub-class extending" +
+        " AMRMClient is expected to implement this !!");
+  }
   
   /**
    * Update application's blacklist with addition or removal resources.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51432779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
index 3c8f923..2f95156 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.Priority;
@@ -196,6 +197,22 @@ extends AbstractService {
                                                    Priority priority, 
                                                    String resourceName, 
                                                    Resource capability);
+
+  /**
+   * Returns all matching ContainerRequests that match the given Priority,
+   * ResourceName, ExecutionType and Capability.
+   * @param priority Priority.
+   * @param resourceName Location.
+   * @param executionType ExecutionType.
+   * @param capability Capability.
+   * @return All matching ContainerRequests
+   */
+  public List<? extends Collection<T>> getMatchingRequests(
+      Priority priority, String resourceName, ExecutionType executionType,
+      Resource capability) {
+    return client.getMatchingRequests(priority, resourceName,
+        executionType, capability);
+  }
   
   /**
    * Registers this application master with the resource manager. On successful

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51432779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
index 4366c25..4145944 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
@@ -19,19 +19,19 @@
 package org.apache.hadoop.yarn.client.api.impl;
 
 import java.io.IOException;
+import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.AbstractMap.SimpleEntry;
 
@@ -54,6 +54,8 @@ import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerResourceChangeRequest;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.Priority;
@@ -102,7 +104,7 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
   protected final Set<String> blacklistAdditions = new HashSet<String>();
   protected final Set<String> blacklistRemovals = new HashSet<String>();
   
-  class ResourceRequestInfo {
+  static class ResourceRequestInfo<T> {
     ResourceRequest remoteRequest;
     LinkedHashSet<T> containerRequests;
     
@@ -115,11 +117,12 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
     }
   }
 
-
   /**
    * Class compares Resource by memory then cpu in reverse order
    */
-  class ResourceReverseMemoryThenCpuComparator implements Comparator<Resource> {
+  static class ResourceReverseMemoryThenCpuComparator implements
+      Comparator<Resource>, Serializable {
+    static final long serialVersionUID = 12345L;
     @Override
     public int compare(Resource arg0, Resource arg1) {
       long mem0 = arg0.getMemorySize();
@@ -141,7 +144,7 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
       return -1;
     }    
   }
-  
+
   static boolean canFit(Resource arg0, Resource arg1) {
     long mem0 = arg0.getMemorySize();
     long mem1 = arg1.getMemorySize();
@@ -150,17 +153,8 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
     
     return (mem0 <= mem1 && cpu0 <= cpu1);
   }
-  
-  //Key -> Priority
-  //Value -> Map
-  //Key->ResourceName (e.g., nodename, rackname, *)
-  //Value->Map
-  //Key->Resource Capability
-  //Value->ResourceRequest
-  protected final 
-  Map<Priority, Map<String, TreeMap<Resource, ResourceRequestInfo>>>
-    remoteRequestsTable =
-    new TreeMap<Priority, Map<String, TreeMap<Resource, ResourceRequestInfo>>>();
+
+  final RemoteRequestsTable remoteRequestsTable = new RemoteRequestsTable<T>();
 
   protected final Set<ResourceRequest> ask = new TreeSet<ResourceRequest>(
       new org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestComparator());
@@ -185,6 +179,12 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
     super(AMRMClientImpl.class.getName());
   }
 
+  @VisibleForTesting
+  AMRMClientImpl(ApplicationMasterProtocol protocol) {
+    super(AMRMClientImpl.class.getName());
+    this.rmClient = protocol;
+  }
+
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
     RackResolver.init(conf);
@@ -195,8 +195,10 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
   protected void serviceStart() throws Exception {
     final YarnConfiguration conf = new YarnConfiguration(getConfig());
     try {
-      rmClient =
-          ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class);
+      if (rmClient == null) {
+        rmClient = ClientRMProxy.createRMProxy(
+            conf, ApplicationMasterProtocol.class);
+      }
     } catch (IOException e) {
       throw new YarnRuntimeException(e);
     }
@@ -263,7 +265,8 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
           // RPC layer is using it to send info across
           askList.add(ResourceRequest.newInstance(r.getPriority(),
               r.getResourceName(), r.getCapability(), r.getNumContainers(),
-              r.getRelaxLocality(), r.getNodeLabelExpression()));
+              r.getRelaxLocality(), r.getNodeLabelExpression(),
+              r.getExecutionTypeRequest()));
         }
         List<ContainerResourceChangeRequest> increaseList = new ArrayList<>();
         List<ContainerResourceChangeRequest> decreaseList = new ArrayList<>();
@@ -315,13 +318,11 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
         synchronized (this) {
           release.addAll(this.pendingRelease);
           blacklistAdditions.addAll(this.blacklistedNodes);
-          for (Map<String, TreeMap<Resource, ResourceRequestInfo>> rr : remoteRequestsTable
-            .values()) {
-            for (Map<Resource, ResourceRequestInfo> capabalities : rr.values()) {
-              for (ResourceRequestInfo request : capabalities.values()) {
-                addResourceRequestToAsk(request.remoteRequest);
-              }
-            }
+          @SuppressWarnings("unchecked")
+          Iterator<ResourceRequestInfo<T>> reqIter =
+              remoteRequestsTable.iterator();
+          while (reqIter.hasNext()) {
+            addResourceRequestToAsk(reqIter.next().remoteRequest);
           }
           change.putAll(this.pendingChange);
         }
@@ -517,26 +518,28 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
             + joiner.join(req.getNodes()));        
       }
       for (String node : dedupedNodes) {
-        addResourceRequest(req.getPriority(), node, req.getCapability(), req,
-            true, req.getNodeLabelExpression());
+        addResourceRequest(req.getPriority(), node,
+            req.getExecutionTypeRequest(), req.getCapability(), req, true,
+            req.getNodeLabelExpression());
       }
     }
 
     for (String rack : dedupedRacks) {
-      addResourceRequest(req.getPriority(), rack, req.getCapability(), req,
-          true, req.getNodeLabelExpression());
+      addResourceRequest(req.getPriority(), rack, req.getExecutionTypeRequest(),
+          req.getCapability(), req, true, req.getNodeLabelExpression());
     }
 
     // Ensure node requests are accompanied by requests for
     // corresponding rack
     for (String rack : inferredRacks) {
-      addResourceRequest(req.getPriority(), rack, req.getCapability(), req,
-          req.getRelaxLocality(), req.getNodeLabelExpression());
+      addResourceRequest(req.getPriority(), rack, req.getExecutionTypeRequest(),
+          req.getCapability(), req, req.getRelaxLocality(),
+          req.getNodeLabelExpression());
     }
-
     // Off-switch
-    addResourceRequest(req.getPriority(), ResourceRequest.ANY, 
-        req.getCapability(), req, req.getRelaxLocality(), req.getNodeLabelExpression());
+    addResourceRequest(req.getPriority(), ResourceRequest.ANY,
+        req.getExecutionTypeRequest(), req.getCapability(), req,
+        req.getRelaxLocality(), req.getNodeLabelExpression());
   }
 
   @Override
@@ -552,16 +555,18 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
     // Update resource requests
     if (req.getNodes() != null) {
       for (String node : new HashSet<String>(req.getNodes())) {
-        decResourceRequest(req.getPriority(), node, req.getCapability(), req);
+        decResourceRequest(req.getPriority(), node,
+            req.getExecutionTypeRequest(), req.getCapability(), req);
       }
     }
 
     for (String rack : allRacks) {
-      decResourceRequest(req.getPriority(), rack, req.getCapability(), req);
+      decResourceRequest(req.getPriority(), rack,
+          req.getExecutionTypeRequest(), req.getCapability(), req);
     }
 
     decResourceRequest(req.getPriority(), ResourceRequest.ANY,
-        req.getCapability(), req);
+        req.getExecutionTypeRequest(), req.getCapability(), req);
   }
 
   @Override
@@ -601,47 +606,38 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
   public synchronized int getClusterNodeCount() {
     return clusterNodeCount;
   }
-  
+
+  @Override
+  public synchronized List<? extends Collection<T>> getMatchingRequests(
+      Priority priority,
+      String resourceName,
+      Resource capability) {
+    return getMatchingRequests(priority, resourceName,
+        ExecutionType.GUARANTEED, capability);
+  }
+
   @Override
   public synchronized List<? extends Collection<T>> getMatchingRequests(
-                                          Priority priority, 
-                                          String resourceName, 
-                                          Resource capability) {
+      Priority priority, String resourceName, ExecutionType executionType,
+      Resource capability) {
     Preconditions.checkArgument(capability != null,
         "The Resource to be requested should not be null ");
     Preconditions.checkArgument(priority != null,
         "The priority at which to request containers should not be null ");
     List<LinkedHashSet<T>> list = new LinkedList<LinkedHashSet<T>>();
-    Map<String, TreeMap<Resource, ResourceRequestInfo>> remoteRequests = 
-        this.remoteRequestsTable.get(priority);
-    if (remoteRequests == null) {
-      return list;
-    }
-    TreeMap<Resource, ResourceRequestInfo> reqMap = remoteRequests
-        .get(resourceName);
-    if (reqMap == null) {
-      return list;
-    }
 
-    ResourceRequestInfo resourceRequestInfo = reqMap.get(capability);
-    if (resourceRequestInfo != null &&
-        !resourceRequestInfo.containerRequests.isEmpty()) {
-      list.add(resourceRequestInfo.containerRequests);
-      return list;
-    }
-    
-    // no exact match. Container may be larger than what was requested.
-    // get all resources <= capability. map is reverse sorted. 
-    SortedMap<Resource, ResourceRequestInfo> tailMap = 
-                                                  reqMap.tailMap(capability);
-    for(Map.Entry<Resource, ResourceRequestInfo> entry : tailMap.entrySet()) {
-      if (canFit(entry.getKey(), capability) &&
-          !entry.getValue().containerRequests.isEmpty()) {
-        // match found that fits in the larger resource
-        list.add(entry.getValue().containerRequests);
+    @SuppressWarnings("unchecked")
+    List<ResourceRequestInfo<T>> matchingRequests =
+        this.remoteRequestsTable.getMatchingRequests(priority, resourceName,
+            executionType, capability);
+    // If no exact match. Container may be larger than what was requested.
+    // get all resources <= capability. map is reverse sorted.
+    for (ResourceRequestInfo<T> resReqInfo : matchingRequests) {
+      if (canFit(resReqInfo.remoteRequest.getCapability(), capability) &&
+        !resReqInfo.containerRequests.isEmpty()) {
+        list.add(resReqInfo.containerRequests);
       }
     }
-    
     // no match found
     return list;          
   }
@@ -663,34 +659,30 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
     
     return racks;
   }
-  
+
   /**
    * ContainerRequests with locality relaxation cannot be made at the same
    * priority as ContainerRequests without locality relaxation.
    */
   private void checkLocalityRelaxationConflict(Priority priority,
       Collection<String> locations, boolean relaxLocality) {
-    Map<String, TreeMap<Resource, ResourceRequestInfo>> remoteRequests =
-        this.remoteRequestsTable.get(priority);
-    if (remoteRequests == null) {
-      return;
-    }
     // Locality relaxation will be set to relaxLocality for all implicitly
     // requested racks. Make sure that existing rack requests match this.
-    for (String location : locations) {
-        TreeMap<Resource, ResourceRequestInfo> reqs =
-            remoteRequests.get(location);
-        if (reqs != null && !reqs.isEmpty()) {
-          boolean existingRelaxLocality =
-              reqs.values().iterator().next().remoteRequest.getRelaxLocality();
-          if (relaxLocality != existingRelaxLocality) {
-            throw new InvalidContainerRequestException("Cannot submit a "
-                + "ContainerRequest asking for location " + location
-                + " with locality relaxation " + relaxLocality + " when it has "
-                + "already been requested with locality relaxation " + existingRelaxLocality);
-          }
-        }
+
+    @SuppressWarnings("unchecked")
+    List<ResourceRequestInfo> allCapabilityMaps =
+        remoteRequestsTable.getAllResourceRequestInfos(priority, locations);
+    for (ResourceRequestInfo reqs : allCapabilityMaps) {
+      ResourceRequest remoteRequest = reqs.remoteRequest;
+      boolean existingRelaxLocality = remoteRequest.getRelaxLocality();
+      if (relaxLocality != existingRelaxLocality) {
+        throw new InvalidContainerRequestException("Cannot submit a "
+            + "ContainerRequest asking for location "
+            + remoteRequest.getResourceName() + " with locality relaxation "
+            + relaxLocality + " when it has already been requested"
+            + "with locality relaxation " + existingRelaxLocality);
       }
+    }
   }
   
   /**
@@ -747,46 +739,13 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
     ask.add(remoteRequest);
   }
 
-  private void
-      addResourceRequest(Priority priority, String resourceName,
-          Resource capability, T req, boolean relaxLocality,
-          String labelExpression) {
-    Map<String, TreeMap<Resource, ResourceRequestInfo>> remoteRequests =
-      this.remoteRequestsTable.get(priority);
-    if (remoteRequests == null) {
-      remoteRequests = 
-          new HashMap<String, TreeMap<Resource, ResourceRequestInfo>>();
-      this.remoteRequestsTable.put(priority, remoteRequests);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Added priority=" + priority);
-      }
-    }
-    TreeMap<Resource, ResourceRequestInfo> reqMap = 
-                                          remoteRequests.get(resourceName);
-    if (reqMap == null) {
-      // capabilities are stored in reverse sorted order. smallest last.
-      reqMap = new TreeMap<Resource, ResourceRequestInfo>(
-          new ResourceReverseMemoryThenCpuComparator());
-      remoteRequests.put(resourceName, reqMap);
-    }
-    ResourceRequestInfo resourceRequestInfo = reqMap.get(capability);
-    if (resourceRequestInfo == null) {
-      resourceRequestInfo =
-          new ResourceRequestInfo(priority, resourceName, capability,
-              relaxLocality);
-      reqMap.put(capability, resourceRequestInfo);
-    }
-    
-    resourceRequestInfo.remoteRequest.setNumContainers(
-         resourceRequestInfo.remoteRequest.getNumContainers() + 1);
-
-    if (relaxLocality) {
-      resourceRequestInfo.containerRequests.add(req);
-    }
-    
-    if (ResourceRequest.ANY.equals(resourceName)) {
-      resourceRequestInfo.remoteRequest.setNodeLabelExpression(labelExpression);
-    }
+  private void addResourceRequest(Priority priority, String resourceName,
+      ExecutionTypeRequest execTypeReq, Resource capability, T req,
+      boolean relaxLocality, String labelExpression) {
+    @SuppressWarnings("unchecked")
+    ResourceRequestInfo resourceRequestInfo = remoteRequestsTable
+        .addResourceRequest(priority, resourceName,
+        execTypeReq, capability, req, relaxLocality, labelExpression);
 
     // Note this down for next interaction with ResourceManager
     addResourceRequestToAsk(resourceRequestInfo.remoteRequest);
@@ -800,70 +759,31 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
     }
   }
 
-  private void decResourceRequest(Priority priority, 
-                                   String resourceName,
-                                   Resource capability, 
-                                   T req) {
-    Map<String, TreeMap<Resource, ResourceRequestInfo>> remoteRequests =
-      this.remoteRequestsTable.get(priority);
-    
-    if(remoteRequests == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Not decrementing resource as priority " + priority 
-            + " is not present in request table");
-      }
-      return;
-    }
-    
-    Map<Resource, ResourceRequestInfo> reqMap = remoteRequests.get(resourceName);
-    if (reqMap == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Not decrementing resource as " + resourceName
-            + " is not present in request table");
-      }
-      return;
-    }
-    ResourceRequestInfo resourceRequestInfo = reqMap.get(capability);
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("BEFORE decResourceRequest:" + " applicationId="
-          + " priority=" + priority.getPriority()
-          + " resourceName=" + resourceName + " numContainers="
-          + resourceRequestInfo.remoteRequest.getNumContainers() 
-          + " #asks=" + ask.size());
-    }
-
-    resourceRequestInfo.remoteRequest.setNumContainers(
-        resourceRequestInfo.remoteRequest.getNumContainers() - 1);
-
-    resourceRequestInfo.containerRequests.remove(req);
-    
-    if(resourceRequestInfo.remoteRequest.getNumContainers() < 0) {
-      // guard against spurious removals
-      resourceRequestInfo.remoteRequest.setNumContainers(0);
-    }
+  private void decResourceRequest(Priority priority, String resourceName,
+      ExecutionTypeRequest execTypeReq, Resource capability, T req) {
+    @SuppressWarnings("unchecked")
+    ResourceRequestInfo resourceRequestInfo =
+        remoteRequestsTable.decResourceRequest(priority, resourceName,
+            execTypeReq, capability, req);
     // send the ResourceRequest to RM even if is 0 because it needs to override
     // a previously sent value. If ResourceRequest was not sent previously then
     // sending 0 aught to be a no-op on RM
-    addResourceRequestToAsk(resourceRequestInfo.remoteRequest);
+    if (resourceRequestInfo != null) {
+      addResourceRequestToAsk(resourceRequestInfo.remoteRequest);
 
-    // delete entries from map if no longer needed
-    if (resourceRequestInfo.remoteRequest.getNumContainers() == 0) {
-      reqMap.remove(capability);
-      if (reqMap.size() == 0) {
-        remoteRequests.remove(resourceName);
+      // delete entry from map if no longer needed
+      if (resourceRequestInfo.remoteRequest.getNumContainers() == 0) {
+        this.remoteRequestsTable.remove(priority, resourceName,
+            execTypeReq.getExecutionType(), capability);
       }
-      if (remoteRequests.size() == 0) {
-        remoteRequestsTable.remove(priority);
-      }
-    }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("AFTER decResourceRequest:" + " applicationId="
-          + " priority=" + priority.getPriority()
-          + " resourceName=" + resourceName + " numContainers="
-          + resourceRequestInfo.remoteRequest.getNumContainers() 
-          + " #asks=" + ask.size());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("AFTER decResourceRequest:" + " applicationId="
+            + " priority=" + priority.getPriority()
+            + " resourceName=" + resourceName + " numContainers="
+            + resourceRequestInfo.remoteRequest.getNumContainers()
+            + " #asks=" + ask.size());
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51432779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java
new file mode 100644
index 0000000..853a512
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java
@@ -0,0 +1,332 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.api.impl;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl.ResourceRequestInfo;
+import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl.ResourceReverseMemoryThenCpuComparator;
+
+class RemoteRequestsTable<T> implements Iterable<ResourceRequestInfo>{
+
+  private static final Log LOG = LogFactory.getLog(RemoteRequestsTable.class);
+
+  static ResourceReverseMemoryThenCpuComparator resourceComparator =
+      new ResourceReverseMemoryThenCpuComparator();
+
+  /**
+   * Nested Iterator that iterates over just the ResourceRequestInfo
+   * object.
+   */
+  class RequestInfoIterator implements Iterator<ResourceRequestInfo> {
+    private Iterator<Map<String, Map<ExecutionType, TreeMap<Resource,
+        ResourceRequestInfo>>>> iLocMap;
+    private Iterator<Map<ExecutionType, TreeMap<Resource,
+        ResourceRequestInfo>>> iExecTypeMap;
+    private Iterator<TreeMap<Resource, ResourceRequestInfo>> iCapMap;
+    private Iterator<ResourceRequestInfo> iResReqInfo;
+
+    public RequestInfoIterator(Iterator<Map<String,
+        Map<ExecutionType, TreeMap<Resource, ResourceRequestInfo>>>>
+        iLocationMap) {
+      this.iLocMap = iLocationMap;
+      if (iLocMap.hasNext()) {
+        iExecTypeMap = iLocMap.next().values().iterator();
+      } else {
+        iExecTypeMap =
+            new LinkedList<Map<ExecutionType, TreeMap<Resource,
+                ResourceRequestInfo>>>().iterator();
+      }
+      if (iExecTypeMap.hasNext()) {
+        iCapMap = iExecTypeMap.next().values().iterator();
+      } else {
+        iCapMap =
+            new LinkedList<TreeMap<Resource, ResourceRequestInfo>>()
+                .iterator();
+      }
+      if (iCapMap.hasNext()) {
+        iResReqInfo = iCapMap.next().values().iterator();
+      } else {
+        iResReqInfo = new LinkedList<ResourceRequestInfo>().iterator();
+      }
+    }
+
+    @Override
+    public boolean hasNext() {
+      return iLocMap.hasNext()
+          || iExecTypeMap.hasNext()
+          || iCapMap.hasNext()
+          || iResReqInfo.hasNext();
+    }
+
+    @Override
+    public ResourceRequestInfo next() {
+      if (!iResReqInfo.hasNext()) {
+        if (!iCapMap.hasNext()) {
+          if (!iExecTypeMap.hasNext()) {
+            iExecTypeMap = iLocMap.next().values().iterator();
+          }
+          iCapMap = iExecTypeMap.next().values().iterator();
+        }
+        iResReqInfo = iCapMap.next().values().iterator();
+      }
+      return iResReqInfo.next();
+    }
+
+    @Override
+    public void remove() {
+      throw new UnsupportedOperationException("Remove is not supported" +
+          "for this iterator !!");
+    }
+  }
+
+  // Nest map with Primary key :
+  // Priority -> ResourceName(String) -> ExecutionType -> Capability(Resource)
+  // and value : ResourceRequestInfo
+  private Map<Priority, Map<String, Map<ExecutionType, TreeMap<Resource,
+      ResourceRequestInfo>>>> remoteRequestsTable = new HashMap<>();
+
+  @Override
+  public Iterator<ResourceRequestInfo> iterator() {
+    return new RequestInfoIterator(remoteRequestsTable.values().iterator());
+  }
+
+  ResourceRequestInfo get(Priority priority, String location,
+      ExecutionType execType, Resource capability) {
+    TreeMap<Resource, ResourceRequestInfo> capabilityMap =
+        getCapabilityMap(priority, location, execType);
+    if (capabilityMap == null) {
+      return null;
+    }
+    return capabilityMap.get(capability);
+  }
+
+  void put(Priority priority, String resourceName, ExecutionType execType,
+      Resource capability, ResourceRequestInfo resReqInfo) {
+    Map<String, Map<ExecutionType, TreeMap<Resource,
+        ResourceRequestInfo>>> locationMap =
+        remoteRequestsTable.get(priority);
+    if (locationMap == null) {
+      locationMap = new HashMap<>();
+      this.remoteRequestsTable.put(priority, locationMap);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Added priority=" + priority);
+      }
+    }
+    Map<ExecutionType, TreeMap<Resource, ResourceRequestInfo>> execTypeMap =
+        locationMap.get(resourceName);
+    if (execTypeMap == null) {
+      execTypeMap = new HashMap<>();
+      locationMap.put(resourceName, execTypeMap);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Added resourceName=" + resourceName);
+      }
+    }
+    TreeMap<Resource, ResourceRequestInfo> capabilityMap =
+        execTypeMap.get(execType);
+    if (capabilityMap == null) {
+      capabilityMap = new TreeMap<>(resourceComparator);
+      execTypeMap.put(execType, capabilityMap);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Added Execution Type=" + execType);
+      }
+    }
+    capabilityMap.put(capability, resReqInfo);
+  }
+
+  ResourceRequestInfo remove(Priority priority, String resourceName,
+      ExecutionType execType, Resource capability) {
+    ResourceRequestInfo retVal = null;
+    Map<String, Map<ExecutionType, TreeMap<Resource,
+        ResourceRequestInfo>>> locationMap = remoteRequestsTable.get(priority);
+    if (locationMap == null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("No such priority=" + priority);
+      }
+      return null;
+    }
+    Map<ExecutionType, TreeMap<Resource, ResourceRequestInfo>>
+        execTypeMap = locationMap.get(resourceName);
+    if (execTypeMap == null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("No such resourceName=" + resourceName);
+      }
+      return null;
+    }
+    TreeMap<Resource, ResourceRequestInfo> capabilityMap =
+        execTypeMap.get(execType);
+    if (capabilityMap == null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("No such Execution Type=" + execType);
+      }
+      return null;
+    }
+    retVal = capabilityMap.remove(capability);
+    if (capabilityMap.size() == 0) {
+      execTypeMap.remove(execType);
+      if (execTypeMap.size() == 0) {
+        locationMap.remove(resourceName);
+        if (locationMap.size() == 0) {
+          this.remoteRequestsTable.remove(priority);
+        }
+      }
+    }
+    return retVal;
+  }
+
+  Map<String, Map<ExecutionType, TreeMap<Resource,
+      ResourceRequestInfo>>> getLocationMap(Priority priority) {
+    return remoteRequestsTable.get(priority);
+  }
+
+  Map<ExecutionType, TreeMap<Resource, ResourceRequestInfo>>
+      getExecutionTypeMap(Priority priority, String location) {
+    Map<String, Map<ExecutionType, TreeMap<Resource,
+        ResourceRequestInfo>>> locationMap = getLocationMap(priority);
+    if (locationMap == null) {
+      return null;
+    }
+    return locationMap.get(location);
+  }
+
+  TreeMap<Resource, ResourceRequestInfo> getCapabilityMap(Priority
+      priority, String location,
+      ExecutionType execType) {
+    Map<ExecutionType, TreeMap<Resource, ResourceRequestInfo>>
+        executionTypeMap = getExecutionTypeMap(priority, location);
+    if (executionTypeMap == null) {
+      return null;
+    }
+    return executionTypeMap.get(execType);
+  }
+
+  @SuppressWarnings("unchecked")
+  List<ResourceRequestInfo> getAllResourceRequestInfos(Priority priority,
+      Collection<String> locations) {
+    List retList = new LinkedList<>();
+    for (String location : locations) {
+      for (ExecutionType eType : ExecutionType.values()) {
+        TreeMap<Resource, ResourceRequestInfo> capabilityMap =
+            getCapabilityMap(priority, location, eType);
+        if (capabilityMap != null) {
+          retList.addAll(capabilityMap.values());
+        }
+      }
+    }
+    return retList;
+  }
+
+  List<ResourceRequestInfo> getMatchingRequests(
+      Priority priority, String resourceName, ExecutionType executionType,
+      Resource capability) {
+    List<ResourceRequestInfo> list = new LinkedList<>();
+    TreeMap<Resource, ResourceRequestInfo> capabilityMap =
+        getCapabilityMap(priority, resourceName, executionType);
+    if (capabilityMap != null) {
+      ResourceRequestInfo resourceRequestInfo = capabilityMap.get(capability);
+      if (resourceRequestInfo != null) {
+        list.add(resourceRequestInfo);
+      } else {
+        list.addAll(capabilityMap.tailMap(capability).values());
+      }
+    }
+    return list;
+  }
+
+  @SuppressWarnings("unchecked")
+  ResourceRequestInfo addResourceRequest(Priority priority, String resourceName,
+      ExecutionTypeRequest execTypeReq, Resource capability, T req,
+      boolean relaxLocality, String labelExpression) {
+    ResourceRequestInfo resourceRequestInfo = get(priority, resourceName,
+        execTypeReq.getExecutionType(), capability);
+    if (resourceRequestInfo == null) {
+      resourceRequestInfo =
+          new ResourceRequestInfo(priority, resourceName, capability,
+              relaxLocality);
+      put(priority, resourceName, execTypeReq.getExecutionType(), capability,
+          resourceRequestInfo);
+    }
+    resourceRequestInfo.remoteRequest.setExecutionTypeRequest(execTypeReq);
+    resourceRequestInfo.remoteRequest.setNumContainers(
+        resourceRequestInfo.remoteRequest.getNumContainers() + 1);
+
+    if (relaxLocality) {
+      resourceRequestInfo.containerRequests.add(req);
+    }
+
+    if (ResourceRequest.ANY.equals(resourceName)) {
+      resourceRequestInfo.remoteRequest.setNodeLabelExpression(labelExpression);
+    }
+    return resourceRequestInfo;
+  }
+
+  ResourceRequestInfo decResourceRequest(Priority priority, String resourceName,
+      ExecutionTypeRequest execTypeReq, Resource capability, T req) {
+    ResourceRequestInfo resourceRequestInfo = get(priority, resourceName,
+        execTypeReq.getExecutionType(), capability);
+
+    if (resourceRequestInfo == null) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Not decrementing resource as ResourceRequestInfo with" +
+            "priority=" + priority + ", " +
+            "resourceName=" + resourceName + ", " +
+            "executionType=" + execTypeReq + ", " +
+            "capability=" + capability + " is not present in request table");
+      }
+      return null;
+    }
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("BEFORE decResourceRequest:" + " applicationId="
+          + " priority=" + priority.getPriority()
+          + " resourceName=" + resourceName + " numContainers="
+          + resourceRequestInfo.remoteRequest.getNumContainers());
+    }
+
+    resourceRequestInfo.remoteRequest.setNumContainers(
+        resourceRequestInfo.remoteRequest.getNumContainers() - 1);
+
+    resourceRequestInfo.containerRequests.remove(req);
+
+    if (resourceRequestInfo.remoteRequest.getNumContainers() < 0) {
+      // guard against spurious removals
+      resourceRequestInfo.remoteRequest.setNumContainers(0);
+    }
+    return resourceRequestInfo;
+  }
+
+  boolean isEmpty() {
+    return remoteRequestsTable.isEmpty();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51432779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/BaseAMRMProxyE2ETest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/BaseAMRMProxyE2ETest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/BaseAMRMProxyE2ETest.java
new file mode 100644
index 0000000..0b62054
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/BaseAMRMProxyE2ETest.java
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.api.impl;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.client.ClientRMProxy;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.YarnClient;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.apache.hadoop.yarn.server.MiniYARNCluster;
+import org.apache.hadoop.yarn.server.nodemanager.amrmproxy.AMRMProxyTokenSecretManager;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.Records;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+
+/**
+ * Base test case to be used for Testing frameworks that use AMRMProxy.
+ */
+public abstract class BaseAMRMProxyE2ETest {
+
+  protected ApplicationMasterProtocol createAMRMProtocol(YarnClient rmClient,
+      ApplicationId appId, MiniYARNCluster cluster,
+      final Configuration yarnConf)
+      throws IOException, InterruptedException, YarnException {
+
+    UserGroupInformation user = null;
+
+    // Get the AMRMToken from AMRMProxy
+
+    ApplicationReport report = rmClient.getApplicationReport(appId);
+
+    user = UserGroupInformation.createProxyUser(
+        report.getCurrentApplicationAttemptId().toString(),
+        UserGroupInformation.getCurrentUser());
+
+    ContainerManagerImpl containerManager = (ContainerManagerImpl) cluster
+        .getNodeManager(0).getNMContext().getContainerManager();
+
+    AMRMProxyTokenSecretManager amrmTokenSecretManager =
+        containerManager.getAMRMProxyService().getSecretManager();
+    org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> token =
+        amrmTokenSecretManager
+            .createAndGetAMRMToken(report.getCurrentApplicationAttemptId());
+
+    SecurityUtil.setTokenService(token,
+        containerManager.getAMRMProxyService().getBindAddress());
+    user.addToken(token);
+
+    // Start Application Master
+
+    return user
+        .doAs(new PrivilegedExceptionAction<ApplicationMasterProtocol>() {
+          @Override
+          public ApplicationMasterProtocol run() throws Exception {
+            return ClientRMProxy.createRMProxy(yarnConf,
+                ApplicationMasterProtocol.class);
+          }
+        });
+  }
+
+  protected AllocateRequest createAllocateRequest(List<NodeReport> listNode) {
+    // The test needs AMRMClient to create a real allocate request
+    AMRMClientImpl<AMRMClient.ContainerRequest> amClient =
+        new AMRMClientImpl<>();
+
+    Resource capability = Resource.newInstance(1024, 2);
+    Priority priority = Priority.newInstance(1);
+    List<NodeReport> nodeReports = listNode;
+    String node = nodeReports.get(0).getNodeId().getHost();
+    String[] nodes = new String[] {node};
+
+    AMRMClient.ContainerRequest storedContainer1 =
+        new AMRMClient.ContainerRequest(capability, nodes, null, priority);
+    amClient.addContainerRequest(storedContainer1);
+    amClient.addContainerRequest(storedContainer1);
+
+    List<ResourceRequest> resourceAsk = new ArrayList<>();
+    for (ResourceRequest rr : amClient.ask) {
+      resourceAsk.add(rr);
+    }
+
+    ResourceBlacklistRequest resourceBlacklistRequest = ResourceBlacklistRequest
+        .newInstance(new ArrayList<>(), new ArrayList<>());
+
+    int responseId = 1;
+
+    return AllocateRequest.newInstance(responseId, 0, resourceAsk,
+        new ArrayList<>(), resourceBlacklistRequest);
+  }
+
+  protected ApplicationAttemptId createApp(YarnClient yarnClient,
+      MiniYARNCluster yarnCluster, Configuration conf) throws Exception {
+
+    ApplicationSubmissionContext appContext =
+        yarnClient.createApplication().getApplicationSubmissionContext();
+    ApplicationId appId = appContext.getApplicationId();
+
+    appContext.setApplicationName("Test");
+
+    Priority pri = Records.newRecord(Priority.class);
+    pri.setPriority(0);
+    appContext.setPriority(pri);
+
+    appContext.setQueue("default");
+
+    ContainerLaunchContext amContainer = BuilderUtils.newContainerLaunchContext(
+        Collections.<String, LocalResource> emptyMap(),
+        new HashMap<String, String>(), Arrays.asList("sleep", "10000"),
+        new HashMap<String, ByteBuffer>(), null,
+        new HashMap<ApplicationAccessType, String>());
+    appContext.setAMContainerSpec(amContainer);
+    appContext.setResource(Resource.newInstance(1024, 1));
+
+    SubmitApplicationRequest appRequest =
+        Records.newRecord(SubmitApplicationRequest.class);
+    appRequest.setApplicationSubmissionContext(appContext);
+
+    yarnClient.submitApplication(appContext);
+
+    RMAppAttempt appAttempt = null;
+    ApplicationAttemptId attemptId = null;
+    while (true) {
+      ApplicationReport appReport = yarnClient.getApplicationReport(appId);
+      if (appReport
+          .getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
+        attemptId =
+            appReport.getCurrentApplicationAttemptId();
+        appAttempt = yarnCluster.getResourceManager().getRMContext().getRMApps()
+            .get(attemptId.getApplicationId()).getCurrentAppAttempt();
+        while (true) {
+          if (appAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) {
+            break;
+          }
+        }
+        break;
+      }
+    }
+    Thread.sleep(1000);
+    // Just dig into the ResourceManager and get the AMRMToken just for the sake
+    // of testing.
+    UserGroupInformation.setLoginUser(UserGroupInformation
+        .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
+
+    // emulate RM setup of AMRM token in credentials by adding the token
+    // *before* setting the token service
+    UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken());
+    appAttempt.getAMRMToken().setService(
+        ClientRMProxy.getAMRMTokenService(conf));
+    return attemptId;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51432779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index 75b49d0..99bfca5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.NMToken;
@@ -413,11 +414,13 @@ public class TestAMRMClient {
       amClient.addContainerRequest(storedContainer3);
       
       // test addition and storage
-      int containersRequestedAny = amClient.remoteRequestsTable.get(priority)
-       .get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers();
+      int containersRequestedAny = amClient.remoteRequestsTable.get(priority,
+          ResourceRequest.ANY, ExecutionType.GUARANTEED, capability)
+          .remoteRequest.getNumContainers();
       assertEquals(2, containersRequestedAny);
-      containersRequestedAny = amClient.remoteRequestsTable.get(priority1)
-          .get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers();
+      containersRequestedAny = amClient.remoteRequestsTable.get(priority1,
+          ResourceRequest.ANY, ExecutionType.GUARANTEED, capability)
+          .remoteRequest.getNumContainers();
          assertEquals(1, containersRequestedAny);
       List<? extends Collection<ContainerRequest>> matches = 
           amClient.getMatchingRequests(priority, node, capability);
@@ -919,12 +922,15 @@ public class TestAMRMClient {
     amClient.removeContainerRequest(
         new ContainerRequest(capability, nodes, racks, priority));
     
-    int containersRequestedNode = amClient.remoteRequestsTable.get(priority)
-        .get(node).get(capability).remoteRequest.getNumContainers();
-    int containersRequestedRack = amClient.remoteRequestsTable.get(priority)
-        .get(rack).get(capability).remoteRequest.getNumContainers();
-    int containersRequestedAny = amClient.remoteRequestsTable.get(priority)
-    .get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers();
+    int containersRequestedNode = amClient.remoteRequestsTable.get(priority,
+        node, ExecutionType.GUARANTEED, capability).remoteRequest
+        .getNumContainers();
+    int containersRequestedRack = amClient.remoteRequestsTable.get(priority,
+        rack, ExecutionType.GUARANTEED, capability).remoteRequest
+        .getNumContainers();
+    int containersRequestedAny = amClient.remoteRequestsTable.get(priority,
+        ResourceRequest.ANY, ExecutionType.GUARANTEED, capability)
+        .remoteRequest.getNumContainers();
 
     assertEquals(2, containersRequestedNode);
     assertEquals(2, containersRequestedRack);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51432779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
index cb8c86a..2db33c1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
@@ -26,6 +26,8 @@ import java.util.List;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.net.DNSToSwitchMapping;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -35,6 +37,46 @@ import org.apache.hadoop.yarn.client.api.InvalidContainerRequestException;
 import org.junit.Test;
 
 public class TestAMRMClientContainerRequest {
+
+  @Test
+  public void testOpportunisticAndGuaranteedRequests() {
+    AMRMClientImpl<ContainerRequest> client =
+        new AMRMClientImpl<ContainerRequest>();
+
+    Configuration conf = new Configuration();
+    conf.setClass(
+        CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
+        MyResolver.class, DNSToSwitchMapping.class);
+    client.init(conf);
+
+    Resource capability = Resource.newInstance(1024, 1);
+    ContainerRequest request =
+        new ContainerRequest(capability, new String[] {"host1", "host2"},
+            new String[] {"/rack2"}, Priority.newInstance(1));
+    client.addContainerRequest(request);
+    verifyResourceRequest(client, request, "host1", true);
+    verifyResourceRequest(client, request, "host2", true);
+    verifyResourceRequest(client, request, "/rack1", true);
+    verifyResourceRequest(client, request, "/rack2", true);
+    verifyResourceRequest(client, request, ResourceRequest.ANY, true);
+    ContainerRequest request2 =
+        new ContainerRequest(capability, new String[] {"host1", "host2"},
+            new String[] {"/rack2"}, Priority.newInstance(1), true, null,
+            ExecutionTypeRequest.newInstance(
+                ExecutionType.OPPORTUNISTIC, true));
+    client.addContainerRequest(request2);
+    verifyResourceRequest(client, request, "host1", true,
+        ExecutionType.OPPORTUNISTIC);
+    verifyResourceRequest(client, request, "host2", true,
+        ExecutionType.OPPORTUNISTIC);
+    verifyResourceRequest(client, request, "/rack1", true,
+        ExecutionType.OPPORTUNISTIC);
+    verifyResourceRequest(client, request, "/rack2", true,
+        ExecutionType.OPPORTUNISTIC);
+    verifyResourceRequest(client, request, ResourceRequest.ANY, true,
+        ExecutionType.OPPORTUNISTIC);
+  }
+
   @Test
   public void testFillInRacks() {
     AMRMClientImpl<ContainerRequest> client =
@@ -224,8 +266,16 @@ public class TestAMRMClientContainerRequest {
   private void verifyResourceRequest(
       AMRMClientImpl<ContainerRequest> client, ContainerRequest request,
       String location, boolean expectedRelaxLocality) {
-    ResourceRequest ask =  client.remoteRequestsTable.get(request.getPriority())
-        .get(location).get(request.getCapability()).remoteRequest;
+    verifyResourceRequest(client, request, location, expectedRelaxLocality,
+        ExecutionType.GUARANTEED);
+  }
+
+  private void verifyResourceRequest(
+      AMRMClientImpl<ContainerRequest> client, ContainerRequest request,
+      String location, boolean expectedRelaxLocality,
+      ExecutionType executionType) {
+    ResourceRequest ask = client.remoteRequestsTable.get(request.getPriority(),
+        location, executionType, request.getCapability()).remoteRequest;
     assertEquals(location, ask.getResourceName());
     assertEquals(1, ask.getNumContainers());
     assertEquals(expectedRelaxLocality, ask.getRelaxLocality());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51432779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
index f1e3f03..33f7527 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
@@ -19,20 +19,12 @@
 package org.apache.hadoop.yarn.client.api.impl;
 
 import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
@@ -40,43 +32,25 @@ import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
+
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.LocalResource;
-import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
-import org.apache.hadoop.yarn.api.records.Priority;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.Token;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.client.ClientRMProxy;
-import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
-import org.apache.hadoop.yarn.server.nodemanager.amrmproxy.AMRMProxyTokenSecretManager;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
-import org.apache.hadoop.yarn.server.utils.BuilderUtils;
-import org.apache.hadoop.yarn.util.Records;
 import org.junit.Assert;
 import org.junit.Test;
 
-public class TestAMRMProxy {
+/**
+ * End-to-End test cases for the AMRMProxy Service.
+ */
+public class TestAMRMProxy extends BaseAMRMProxyE2ETest {
 
   private static final Log LOG = LogFactory.getLog(TestAMRMProxy.class);
 
@@ -84,7 +58,7 @@ public class TestAMRMProxy {
    * This test validates register, allocate and finish of an application through
    * the AMRMPRoxy.
    */
-  @Test(timeout = 60000)
+  @Test(timeout = 120000)
   public void testAMRMProxyE2E() throws Exception {
     MiniYARNCluster cluster = new MiniYARNCluster("testAMRMProxyE2E", 1, 1, 1);
     YarnClient rmClient = null;
@@ -107,7 +81,8 @@ public class TestAMRMProxy {
 
       // Submit application
 
-      ApplicationId appId = createApp(rmClient, cluster);
+      ApplicationAttemptId appAttmptId = createApp(rmClient, cluster, conf);
+      ApplicationId appId = appAttmptId.getApplicationId();
 
       client = createAMRMProtocol(rmClient, appId, cluster, yarnConf);
 
@@ -173,7 +148,7 @@ public class TestAMRMProxy {
    * that the received token it is different from the previous one within 5
    * requests.
    */
-  @Test(timeout = 60000)
+  @Test(timeout = 120000)
   public void testE2ETokenRenewal() throws Exception {
     MiniYARNCluster cluster =
         new MiniYARNCluster("testE2ETokenRenewal", 1, 1, 1);
@@ -201,7 +176,8 @@ public class TestAMRMProxy {
 
       // Submit
 
-      ApplicationId appId = createApp(rmClient, cluster);
+      ApplicationAttemptId appAttmptId = createApp(rmClient, cluster, conf);
+      ApplicationId appId = appAttmptId.getApplicationId();
 
       client = createAMRMProtocol(rmClient, appId, cluster, yarnConf);
 
@@ -252,7 +228,7 @@ public class TestAMRMProxy {
    * This test validates that an AM cannot register directly to the RM, with the
    * token provided by the AMRMProxy.
    */
-  @Test(timeout = 60000)
+  @Test(timeout = 120000)
   public void testE2ETokenSwap() throws Exception {
     MiniYARNCluster cluster = new MiniYARNCluster("testE2ETokenSwap", 1, 1, 1);
     YarnClient rmClient = null;
@@ -270,7 +246,8 @@ public class TestAMRMProxy {
       rmClient.init(yarnConf);
       rmClient.start();
 
-      ApplicationId appId = createApp(rmClient, cluster);
+      ApplicationAttemptId appAttmptId = createApp(rmClient, cluster, conf);
+      ApplicationId appId = appAttmptId.getApplicationId();
 
       client = createAMRMProtocol(rmClient, appId, cluster, yarnConf);
 
@@ -290,124 +267,4 @@ public class TestAMRMProxy {
       cluster.stop();
     }
   }
-
-  protected ApplicationMasterProtocol createAMRMProtocol(YarnClient rmClient,
-      ApplicationId appId, MiniYARNCluster cluster,
-      final Configuration yarnConf)
-          throws IOException, InterruptedException, YarnException {
-
-    UserGroupInformation user = null;
-
-    // Get the AMRMToken from AMRMProxy
-
-    ApplicationReport report = rmClient.getApplicationReport(appId);
-
-    user = UserGroupInformation.createProxyUser(
-        report.getCurrentApplicationAttemptId().toString(),
-        UserGroupInformation.getCurrentUser());
-
-    ContainerManagerImpl containerManager = (ContainerManagerImpl) cluster
-        .getNodeManager(0).getNMContext().getContainerManager();
-
-    AMRMProxyTokenSecretManager amrmTokenSecretManager =
-        containerManager.getAMRMProxyService().getSecretManager();
-    org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> token =
-        amrmTokenSecretManager
-            .createAndGetAMRMToken(report.getCurrentApplicationAttemptId());
-
-    SecurityUtil.setTokenService(token,
-        containerManager.getAMRMProxyService().getBindAddress());
-    user.addToken(token);
-
-    // Start Application Master
-
-    return user
-        .doAs(new PrivilegedExceptionAction<ApplicationMasterProtocol>() {
-          @Override
-          public ApplicationMasterProtocol run() throws Exception {
-            return ClientRMProxy.createRMProxy(yarnConf,
-                ApplicationMasterProtocol.class);
-          }
-        });
-  }
-
-  protected AllocateRequest createAllocateRequest(List<NodeReport> listNode) {
-    // The test needs AMRMClient to create a real allocate request
-    AMRMClientImpl<ContainerRequest> amClient =
-        new AMRMClientImpl<ContainerRequest>();
-
-    Resource capability = Resource.newInstance(1024, 2);
-    Priority priority = Priority.newInstance(1);
-    List<NodeReport> nodeReports = listNode;
-    String node = nodeReports.get(0).getNodeId().getHost();
-    String[] nodes = new String[] { node };
-
-    ContainerRequest storedContainer1 =
-        new ContainerRequest(capability, nodes, null, priority);
-    amClient.addContainerRequest(storedContainer1);
-    amClient.addContainerRequest(storedContainer1);
-
-    List<ResourceRequest> resourceAsk = new ArrayList<ResourceRequest>();
-    for (ResourceRequest rr : amClient.ask) {
-      resourceAsk.add(rr);
-    }
-
-    ResourceBlacklistRequest resourceBlacklistRequest = ResourceBlacklistRequest
-        .newInstance(new ArrayList<String>(), new ArrayList<String>());
-
-    int responseId = 1;
-
-    return AllocateRequest.newInstance(responseId, 0, resourceAsk,
-        new ArrayList<ContainerId>(), resourceBlacklistRequest);
-  }
-
-  protected ApplicationId createApp(YarnClient yarnClient,
-      MiniYARNCluster yarnCluster) throws Exception {
-
-    ApplicationSubmissionContext appContext =
-        yarnClient.createApplication().getApplicationSubmissionContext();
-    ApplicationId appId = appContext.getApplicationId();
-
-    appContext.setApplicationName("Test");
-
-    Priority pri = Records.newRecord(Priority.class);
-    pri.setPriority(0);
-    appContext.setPriority(pri);
-
-    appContext.setQueue("default");
-
-    ContainerLaunchContext amContainer = BuilderUtils.newContainerLaunchContext(
-        Collections.<String, LocalResource> emptyMap(),
-        new HashMap<String, String>(), Arrays.asList("sleep", "10000"),
-        new HashMap<String, ByteBuffer>(), null,
-        new HashMap<ApplicationAccessType, String>());
-    appContext.setAMContainerSpec(amContainer);
-    appContext.setResource(Resource.newInstance(1024, 1));
-
-    SubmitApplicationRequest appRequest =
-        Records.newRecord(SubmitApplicationRequest.class);
-    appRequest.setApplicationSubmissionContext(appContext);
-
-    yarnClient.submitApplication(appContext);
-
-    RMAppAttempt appAttempt = null;
-    while (true) {
-      ApplicationReport appReport = yarnClient.getApplicationReport(appId);
-      if (appReport
-          .getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
-        ApplicationAttemptId attemptId =
-            appReport.getCurrentApplicationAttemptId();
-        appAttempt = yarnCluster.getResourceManager().getRMContext().getRMApps()
-            .get(attemptId.getApplicationId()).getCurrentAppAttempt();
-        while (true) {
-          if (appAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) {
-            break;
-          }
-        }
-        break;
-      }
-    }
-    Thread.sleep(1000);
-    return appId;
-  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/25] hadoop git commit: YARN-3426. Add jdiff support to YARN. (vinodkv via wangda)

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/03fc6b1b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml
new file mode 100644
index 0000000..158528d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml
@@ -0,0 +1,2581 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Thu May 12 17:48:36 PDT 2016 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="hadoop-yarn-client 2.7.2"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/comm
 ons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vin
 odkv/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.1
 3/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.
 0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guav
 a-11.0.2.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_45.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.7.2.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/
 Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/vinodkv/.m2/repository/com/google/inject/guice/3.0/guice-3.0.jar:/Users/vinodkv/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/vinodkv/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-g
 it/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/site/jdiff/xml -apiname hadoop-yarn-client 2.7.2 -->
+<package name="org.apache.hadoop.yarn.client">
+  <!-- start class org.apache.hadoop.yarn.client.SCMAdmin -->
+  <class name="SCMAdmin" extends="org.apache.hadoop.conf.Configured"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.util.Tool"/>
+    <constructor name="SCMAdmin"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="SCMAdmin" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createSCMAdminProtocol" return="org.apache.hadoop.yarn.server.api.SCMAdminProtocol"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="run" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.SCMAdmin -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api">
+  <!-- start class org.apache.hadoop.yarn.client.api.AHSClient -->
+  <class name="AHSClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AHSClient" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAHSClient" return="org.apache.hadoop.yarn.client.api.AHSClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of AHSClient.]]>
+      </doc>
+    </method>
+    <method name="getApplicationReport" return="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a report of the given Application.
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ <p>
+ If the user does not have <code>VIEW_APP</code> access then the following
+ fields in the report will be set to stubbed values:
+ <ul>
+   <li>host - set to "N/A"</li>
+   <li>RPC port - set to -1</li>
+   <li>client token - set to "N/A"</li>
+   <li>diagnostics - set to "N/A"</li>
+   <li>tracking URL - set to "N/A"</li>
+   <li>original tracking URL - set to "N/A"</li>
+   <li>resource usage report - all values are -1</li>
+ </ul>
+
+ @param appId
+          {@link ApplicationId} of the application that needs a report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of all Applications in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @return a list of reports for all applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttemptReport" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given ApplicationAttempt.
+ </p>
+
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+
+ @param applicationAttemptId
+          {@link ApplicationAttemptId} of the application attempt that needs
+          a report
+ @return application attempt report
+ @throws YarnException
+ @throws ApplicationAttemptNotFoundException if application attempt
+         not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttempts" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (ApplicationAttempts) of Application in the cluster.
+ </p>
+
+ @param applicationId
+ @return a list of reports for all application attempts for specified
+         application
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerReport" return="org.apache.hadoop.yarn.api.records.ContainerReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Container.
+ </p>
+
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+
+ @param containerId
+          {@link ContainerId} of the container that needs a report
+ @return container report
+ @throws YarnException
+ @throws ContainerNotFoundException if container not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (Containers) of ApplicationAttempt in the cluster.
+ </p>
+
+ @param applicationAttemptId
+ @return a list of reports of all containers for specified application
+         attempt
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.AHSClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.AMRMClient -->
+  <class name="AMRMClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAMRMClient" return="org.apache.hadoop.yarn.client.api.AMRMClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of AMRMClient.
+ For usage:
+ <pre>
+ {@code
+ AMRMClient.<T>createAMRMClientContainerRequest()
+ }</pre>
+ @return the newly create AMRMClient instance.]]>
+      </doc>
+    </method>
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appHostName" type="java.lang.String"/>
+      <param name="appHostPort" type="int"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Register the application master. This must be called before any
+ other interaction
+ @param appHostName Name of the host on which master is running
+ @param appHostPort Port master is listening on
+ @param appTrackingUrl URL at which the master info can be seen
+ @return <code>RegisterApplicationMasterResponse</code>
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="allocate" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="progressIndicator" type="float"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Request additional containers and receive new container allocations.
+ Requests made via <code>addContainerRequest</code> are sent to the
+ <code>ResourceManager</code>. New containers assigned to the master are
+ retrieved. Status of completed containers and node health updates are also
+ retrieved. This also doubles up as a heartbeat to the ResourceManager and
+ must be made periodically. The call may not always return any new
+ allocations of containers. App should not make concurrent allocate
+ requests. May cause request loss.
+
+ <p>
+ Note : If the user has not removed container requests that have already
+ been satisfied, then the re-register may end up sending the entire
+ container requests to the RM (including matched requests). Which would mean
+ the RM could end up giving it a lot of new allocated containers.
+ </p>
+
+ @param progressIndicator Indicates progress made by the master
+ @return the response of the allocate request
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="unregisterApplicationMaster"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <param name="appMessage" type="java.lang.String"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Unregister the application master. This must be called in the end.
+ @param appStatus Success/Failure status of the master
+ @param appMessage Diagnostics message on failure
+ @param appTrackingUrl New URL to get master info
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="addContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Request containers for resources before calling <code>allocate</code>
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="removeContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Remove previous container request. The previous container request may have
+ already been sent to the ResourceManager. So even after the remove request
+ the app must be prepared to receive an allocation for the previous request
+ even after the remove request
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="releaseAssignedContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <doc>
+      <![CDATA[Release containers assigned by the Resource Manager. If the app cannot use
+ the container or wants to give up the container then it can release them.
+ The app needs to make new requests for the released resource capability if
+ it still needs it. eg. it released non-local resources
+ @param containerId]]>
+      </doc>
+    </method>
+    <method name="getAvailableResources" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the currently available resources in the cluster.
+ A valid value is available after a call to allocate has been made
+ @return Currently available resources]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeCount" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current number of nodes in the cluster.
+ A valid values is available after a call to allocate has been made
+ @return Current number of nodes in the cluster]]>
+      </doc>
+    </method>
+    <method name="getMatchingRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Get outstanding <code>ContainerRequest</code>s matching the given
+ parameters. These ContainerRequests should have been added via
+ <code>addContainerRequest</code> earlier in the lifecycle. For performance,
+ the AMRMClient may return its internal collection directly without creating
+ a copy. Users should not perform mutable operations on the return value.
+ Each collection in the list contains requests with identical
+ <code>Resource</code> size that fit in the given capability. In a
+ collection, requests will be returned in the same order as they were added.
+ @return Collection of request matching the parameters]]>
+      </doc>
+    </method>
+    <method name="updateBlacklist"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="blacklistAdditions" type="java.util.List"/>
+      <param name="blacklistRemovals" type="java.util.List"/>
+      <doc>
+      <![CDATA[Update application's blacklist with addition or removal resources.
+
+ @param blacklistAdditions list of resources which should be added to the
+        application blacklist
+ @param blacklistRemovals list of resources which should be removed from the
+        application blacklist]]>
+      </doc>
+    </method>
+    <method name="setNMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nmTokenCache" type="org.apache.hadoop.yarn.client.api.NMTokenCache"/>
+      <doc>
+      <![CDATA[Set the NM token cache for the <code>AMRMClient</code>. This cache must
+ be shared with the {@link NMClient} used to manage containers for the
+ <code>AMRMClient</code>
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @param nmTokenCache the NM token cache to use.]]>
+      </doc>
+    </method>
+    <method name="getNMTokenCache" return="org.apache.hadoop.yarn.client.api.NMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the NM token cache of the <code>AMRMClient</code>. This cache must be
+ shared with the {@link NMClient} used to manage containers for the
+ <code>AMRMClient</code>.
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @return the NM token cache.]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="com.google.common.base.Supplier"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each 1000 ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int)}
+ and {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="com.google.common.base.Supplier"/>
+      <param name="checkEveryMillis" type="int"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each
+ <code>checkEveryMillis</code> ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check user defined checker
+ @param checkEveryMillis interval to call <code>check</code>]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="com.google.common.base.Supplier"/>
+      <param name="checkEveryMillis" type="int"/>
+      <param name="logInterval" type="int"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each
+ <code>checkEveryMillis</code> ms. In the main loop, this method will log
+ the message "waiting in main loop" for each <code>logInterval</code> times
+ iteration to confirm the thread is alive.
+ @param check user defined checker
+ @param checkEveryMillis interval to call <code>check</code>
+ @param logInterval interval to log for each]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.AMRMClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest -->
+  <class name="AMRMClient.ContainerRequest" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMClient.ContainerRequest" type="org.apache.hadoop.yarn.api.records.Resource, java.lang.String[], java.lang.String[], org.apache.hadoop.yarn.api.records.Priority"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Instantiates a {@link ContainerRequest} with the given constraints and
+ locality relaxation enabled.
+
+ @param capability
+          The {@link Resource} to be requested for each container.
+ @param nodes
+          Any hosts to request that the containers are placed on.
+ @param racks
+          Any racks to request that the containers are placed on. The
+          racks corresponding to any hosts requested will be automatically
+          added to this list.
+ @param priority
+          The priority at which to request the containers. Higher
+          priorities have lower numerical values.]]>
+      </doc>
+    </constructor>
+    <constructor name="AMRMClient.ContainerRequest" type="org.apache.hadoop.yarn.api.records.Resource, java.lang.String[], java.lang.String[], org.apache.hadoop.yarn.api.records.Priority, boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Instantiates a {@link ContainerRequest} with the given constraints.
+
+ @param capability
+          The {@link Resource} to be requested for each container.
+ @param nodes
+          Any hosts to request that the containers are placed on.
+ @param racks
+          Any racks to request that the containers are placed on. The
+          racks corresponding to any hosts requested will be automatically
+          added to this list.
+ @param priority
+          The priority at which to request the containers. Higher
+          priorities have lower numerical values.
+ @param relaxLocality
+          If true, containers for this request may be assigned on hosts
+          and racks other than the ones explicitly requested.]]>
+      </doc>
+    </constructor>
+    <constructor name="AMRMClient.ContainerRequest" type="org.apache.hadoop.yarn.api.records.Resource, java.lang.String[], java.lang.String[], org.apache.hadoop.yarn.api.records.Priority, boolean, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Instantiates a {@link ContainerRequest} with the given constraints.
+
+ @param capability
+          The {@link Resource} to be requested for each container.
+ @param nodes
+          Any hosts to request that the containers are placed on.
+ @param racks
+          Any racks to request that the containers are placed on. The
+          racks corresponding to any hosts requested will be automatically
+          added to this list.
+ @param priority
+          The priority at which to request the containers. Higher
+          priorities have lower numerical values.
+ @param relaxLocality
+          If true, containers for this request may be assigned on hosts
+          and racks other than the ones explicitly requested.
+ @param nodeLabelsExpression
+          Set node labels to allocate resource, now we only support
+          asking for only a single node label]]>
+      </doc>
+    </constructor>
+    <method name="getCapability" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNodes" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRacks" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRelaxLocality" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNodeLabelExpression" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Object to represent a single container request for resources. Scheduler
+ documentation should be consulted for the specifics of how the parameters
+ are honored.
+
+ By default, YARN schedulers try to allocate containers at the requested
+ locations but they may relax the constraints in order to expedite meeting
+ allocations limits. They first relax the constraint to the same rack as the
+ requested node and then to anywhere in the cluster. The relaxLocality flag
+ may be used to disable locality relaxation and request containers at only
+ specific locations. The following conditions apply.
+ <ul>
+ <li>Within a priority, all container requests must have the same value for
+ locality relaxation. Either enabled or disabled.</li>
+ <li>If locality relaxation is disabled, then across requests, locations at
+ different network levels may not be specified. E.g. its invalid to make a
+ request for a specific node and another request for a specific rack.</li>
+ <li>If locality relaxation is disabled, then only within the same request,
+ a node and its rack may be specified together. This allows for a specific
+ rack with a preference for a specific node within that rack.</li>
+ <li></li>
+ </ul>
+ To re-enable locality relaxation at a given priority, all pending requests
+ with locality relaxation disabled must be first removed. Then they can be
+ added back with locality relaxation enabled.
+
+ All getters return immutable values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest -->
+  <!-- start class org.apache.hadoop.yarn.client.api.InvalidContainerRequestException -->
+  <class name="InvalidContainerRequestException" extends="org.apache.hadoop.yarn.exceptions.YarnRuntimeException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="InvalidContainerRequestException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="InvalidContainerRequestException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="InvalidContainerRequestException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Thrown when an arguments are combined to construct a
+ <code>AMRMClient.ContainerRequest</code> in an invalid way.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.InvalidContainerRequestException -->
+  <!-- start class org.apache.hadoop.yarn.client.api.NMClient -->
+  <class name="NMClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createNMClient" return="org.apache.hadoop.yarn.client.api.NMClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of NMClient.]]>
+      </doc>
+    </method>
+    <method name="createNMClient" return="org.apache.hadoop.yarn.client.api.NMClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Create a new instance of NMClient.]]>
+      </doc>
+    </method>
+    <method name="startContainer" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <param name="containerLaunchContext" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Start an allocated container.</p>
+
+ <p>The <code>ApplicationMaster</code> or other applications that use the
+ client must provide the details of the allocated container, including the
+ Id, the assigned node's Id and the token via {@link Container}. In
+ addition, the AM needs to provide the {@link ContainerLaunchContext} as
+ well.</p>
+
+ @param container the allocated container
+ @param containerLaunchContext the context information needed by the
+                               <code>NodeManager</code> to launch the
+                               container
+ @return a map between the auxiliary service names and their outputs
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="stopContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Stop an started container.</p>
+
+ @param containerId the Id of the started container
+ @param nodeId the Id of the <code>NodeManager</code>
+
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerStatus" return="org.apache.hadoop.yarn.api.records.ContainerStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Query the status of a container.</p>
+
+ @param containerId the Id of the started container
+ @param nodeId the Id of the <code>NodeManager</code>
+
+ @return the status of a container
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="cleanupRunningContainersOnStop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="enabled" type="boolean"/>
+      <doc>
+      <![CDATA[<p>Set whether the containers that are started by this client, and are
+ still running should be stopped when the client stops. By default, the
+ feature should be enabled.</p> However, containers will be stopped only
+ when service is stopped. i.e. after {@link NMClient#stop()}.
+
+ @param enabled whether the feature is enabled or not]]>
+      </doc>
+    </method>
+    <method name="setNMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nmTokenCache" type="org.apache.hadoop.yarn.client.api.NMTokenCache"/>
+      <doc>
+      <![CDATA[Set the NM Token cache of the <code>NMClient</code>. This cache must be
+ shared with the {@link AMRMClient} that requested the containers managed
+ by this <code>NMClient</code>
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @param nmTokenCache the NM token cache to use.]]>
+      </doc>
+    </method>
+    <method name="getNMTokenCache" return="org.apache.hadoop.yarn.client.api.NMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the NM token cache of the <code>NMClient</code>. This cache must be
+ shared with the {@link AMRMClient} that requested the containers managed
+ by this <code>NMClient</code>
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @return the NM token cache]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.NMClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.NMTokenCache -->
+  <class name="NMTokenCache" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMTokenCache"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Creates a NM token cache instance.]]>
+      </doc>
+    </constructor>
+    <method name="getSingleton" return="org.apache.hadoop.yarn.client.api.NMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the singleton NM token cache.
+
+ @return the singleton NM token cache.]]>
+      </doc>
+    </method>
+    <method name="getNMToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns NMToken, null if absent. Only the singleton obtained from
+ {@link #getSingleton()} is looked at for the tokens. If you are using your
+ own NMTokenCache that is different from the singleton, use
+ {@link #getToken(String) }
+
+ @param nodeAddr
+ @return {@link Token} NMToken required for communicating with node manager]]>
+      </doc>
+    </method>
+    <method name="setNMToken"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <param name="token" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <doc>
+      <![CDATA[Sets the NMToken for node address only in the singleton obtained from
+ {@link #getSingleton()}. If you are using your own NMTokenCache that is
+ different from the singleton, use {@link #setToken(String, Token) }
+
+ @param nodeAddr
+          node address (host:port)
+ @param token
+          NMToken]]>
+      </doc>
+    </method>
+    <method name="getToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns NMToken, null if absent
+ @param nodeAddr
+ @return {@link Token} NMToken required for communicating with node
+         manager]]>
+      </doc>
+    </method>
+    <method name="setToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <param name="token" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <doc>
+      <![CDATA[Sets the NMToken for node address
+ @param nodeAddr node address (host:port)
+ @param token NMToken]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[NMTokenCache manages NMTokens required for an Application Master
+ communicating with individual NodeManagers.
+ <p>
+ By default Yarn client libraries {@link AMRMClient} and {@link NMClient} use
+ {@link #getSingleton()} instance of the cache.
+ <ul>
+   <li>
+     Using the singleton instance of the cache is appropriate when running a
+     single ApplicationMaster in the same JVM.
+   </li>
+   <li>
+     When using the singleton, users don't need to do anything special,
+     {@link AMRMClient} and {@link NMClient} are already set up to use the
+     default singleton {@link NMTokenCache}
+     </li>
+ </ul>
+ If running multiple Application Masters in the same JVM, a different cache
+ instance should be used for each Application Master.
+ <ul>
+   <li>
+     If using the {@link AMRMClient} and the {@link NMClient}, setting up
+     and using an instance cache is as follows:
+ <pre>
+   NMTokenCache nmTokenCache = new NMTokenCache();
+   AMRMClient rmClient = AMRMClient.createAMRMClient();
+   NMClient nmClient = NMClient.createNMClient();
+   nmClient.setNMTokenCache(nmTokenCache);
+   ...
+ </pre>
+   </li>
+   <li>
+     If using the {@link AMRMClientAsync} and the {@link NMClientAsync},
+     setting up and using an instance cache is as follows:
+ <pre>
+   NMTokenCache nmTokenCache = new NMTokenCache();
+   AMRMClient rmClient = AMRMClient.createAMRMClient();
+   NMClient nmClient = NMClient.createNMClient();
+   nmClient.setNMTokenCache(nmTokenCache);
+   AMRMClientAsync rmClientAsync = new AMRMClientAsync(rmClient, 1000, [AMRM_CALLBACK]);
+   NMClientAsync nmClientAsync = new NMClientAsync("nmClient", nmClient, [NM_CALLBACK]);
+   ...
+ </pre>
+   </li>
+   <li>
+     If using {@link ApplicationMasterProtocol} and
+     {@link ContainerManagementProtocol} directly, setting up and using an
+     instance cache is as follows:
+ <pre>
+   NMTokenCache nmTokenCache = new NMTokenCache();
+   ...
+   ApplicationMasterProtocol amPro = ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class);
+   ...
+   AllocateRequest allocateRequest = ...
+   ...
+   AllocateResponse allocateResponse = rmClient.allocate(allocateRequest);
+   for (NMToken token : allocateResponse.getNMTokens()) {
+     nmTokenCache.setToken(token.getNodeId().toString(), token.getToken());
+   }
+   ...
+   ContainerManagementProtocolProxy nmPro = ContainerManagementProtocolProxy(conf, nmTokenCache);
+   ...
+   nmPro.startContainer(container, containerContext);
+   ...
+ </pre>
+   </li>
+ </ul>
+ It is also possible to mix the usage of a client ({@code AMRMClient} or
+ {@code NMClient}, or the async versions of them) with a protocol proxy
+ ({@code ContainerManagementProtocolProxy} or
+ {@code ApplicationMasterProtocol}).]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.NMTokenCache -->
+  <!-- start class org.apache.hadoop.yarn.client.api.SharedCacheClient -->
+  <class name="SharedCacheClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SharedCacheClient" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createSharedCacheClient" return="org.apache.hadoop.yarn.client.api.SharedCacheClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="use" return="org.apache.hadoop.fs.Path"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="resourceKey" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ The method to claim a resource with the <code>SharedCacheManager.</code>
+ The client uses a checksum to identify the resource and an
+ {@link ApplicationId} to identify which application will be using the
+ resource.
+ </p>
+
+ <p>
+ The <code>SharedCacheManager</code> responds with whether or not the
+ resource exists in the cache. If the resource exists, a <code>Path</code>
+ to the resource in the shared cache is returned. If the resource does not
+ exist, null is returned instead.
+ </p>
+
+ @param applicationId ApplicationId of the application using the resource
+ @param resourceKey the key (i.e. checksum) that identifies the resource
+ @return Path to the resource, or null if it does not exist]]>
+      </doc>
+    </method>
+    <method name="release"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="resourceKey" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ The method to release a resource with the <code>SharedCacheManager.</code>
+ This method is called once an application is no longer using a claimed
+ resource in the shared cache. The client uses a checksum to identify the
+ resource and an {@link ApplicationId} to identify which application is
+ releasing the resource.
+ </p>
+
+ <p>
+ Note: This method is an optimization and the client is not required to call
+ it for correctness.
+ </p>
+
+ @param applicationId ApplicationId of the application releasing the
+          resource
+ @param resourceKey the key (i.e. checksum) that identifies the resource]]>
+      </doc>
+    </method>
+    <method name="getFileChecksum" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="sourceFile" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[A convenience method to calculate the checksum of a specified file.
+
+ @param sourceFile A path to the input file
+ @return A hex string containing the checksum digest
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the client for YARN's shared cache.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.SharedCacheClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.YarnClient -->
+  <class name="YarnClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YarnClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createYarnClient" return="org.apache.hadoop.yarn.client.api.YarnClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of YarnClient.]]>
+      </doc>
+    </method>
+    <method name="createApplication" return="org.apache.hadoop.yarn.client.api.YarnClientApplication"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Obtain a {@link YarnClientApplication} for a new application,
+ which in turn contains the {@link ApplicationSubmissionContext} and
+ {@link org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse}
+ objects.
+ </p>
+
+ @return {@link YarnClientApplication} built for a new application
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="submitApplication" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appContext" type="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Submit a new application to <code>YARN.</code> It is a blocking call - it
+ will not return {@link ApplicationId} until the submitted application is
+ submitted successfully and accepted by the ResourceManager.
+ </p>
+
+ <p>
+ Users should provide an {@link ApplicationId} as part of the parameter
+ {@link ApplicationSubmissionContext} when submitting a new application,
+ otherwise it will throw the {@link ApplicationIdNotProvidedException}.
+ </p>
+
+ <p>This internally calls {@link ApplicationClientProtocol#submitApplication
+ (SubmitApplicationRequest)}, and after that, it internally invokes
+ {@link ApplicationClientProtocol#getApplicationReport
+ (GetApplicationReportRequest)} and waits till it can make sure that the
+ application gets properly submitted. If RM fails over or RM restart
+ happens before ResourceManager saves the application's state,
+ {@link ApplicationClientProtocol
+ #getApplicationReport(GetApplicationReportRequest)} will throw
+ the {@link ApplicationNotFoundException}. This API automatically resubmits
+ the application with the same {@link ApplicationSubmissionContext} when it
+ catches the {@link ApplicationNotFoundException}</p>
+
+ @param appContext
+          {@link ApplicationSubmissionContext} containing all the details
+          needed to submit a new application
+ @return {@link ApplicationId} of the accepted application
+ @throws YarnException
+ @throws IOException
+ @see #createApplication()]]>
+      </doc>
+    </method>
+    <method name="killApplication"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Kill an application identified by given ID.
+ </p>
+
+ @param applicationId
+          {@link ApplicationId} of the application that needs to be killed
+ @throws YarnException
+           in case of errors or if YARN rejects the request due to
+           access-control restrictions.
+ @throws IOException
+ @see #getQueueAclsInfo()]]>
+      </doc>
+    </method>
+    <method name="getApplicationReport" return="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Application.
+ </p>
+
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access then the following
+ fields in the report will be set to stubbed values:
+ <ul>
+ <li>host - set to "N/A"</li>
+ <li>RPC port - set to -1</li>
+ <li>client token - set to "N/A"</li>
+ <li>diagnostics - set to "N/A"</li>
+ <li>tracking URL - set to "N/A"</li>
+ <li>original tracking URL - set to "N/A"</li>
+ <li>resource usage report - all values are -1</li>
+ </ul>
+
+ @param appId
+          {@link ApplicationId} of the application that needs a report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAMRMToken" return="org.apache.hadoop.security.token.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the AMRM token of the application.
+ <p>
+ The AMRM token is required for AM to RM scheduling operations. For
+ managed Application Masters Yarn takes care of injecting it. For unmanaged
+ Applications Masters, the token must be obtained via this method and set
+ in the {@link org.apache.hadoop.security.UserGroupInformation} of the
+ current user.
+ <p>
+ The AMRM token will be returned only if all the following conditions are
+ met:
+ <ul>
+   <li>the requester is the owner of the ApplicationMaster</li>
+   <li>the application master is an unmanaged ApplicationMaster</li>
+   <li>the application master is in ACCEPTED state</li>
+ </ul>
+ Else this method returns NULL.
+
+ @param appId {@link ApplicationId} of the application to get the AMRM token
+ @return the AMRM token if available
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of all Applications in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @return a list of reports of all running applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTypes" type="java.util.Set"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications
+ matching the given application types in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationTypes
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications matching the given
+ application states in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationStates
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTypes" type="java.util.Set"/>
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications matching the given
+ application types and application states in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationTypes
+ @param applicationStates
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getYarnClusterMetrics" return="org.apache.hadoop.yarn.api.records.YarnClusterMetrics"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get metrics ({@link YarnClusterMetrics}) about the cluster.
+ </p>
+
+ @return cluster metrics
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getNodeReports" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="states" type="org.apache.hadoop.yarn.api.records.NodeState[]"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of nodes ({@link NodeReport}) in the cluster.
+ </p>
+
+ @param states The {@link NodeState}s to filter on. If no filter states are
+          given, nodes in all states will be returned.
+ @return A list of node reports
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRMDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="org.apache.hadoop.io.Text"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a delegation token so as to be able to talk to YARN using those tokens.
+
+ @param renewer
+          Address of the renewer who can renew these tokens when needed by
+          securely talking to YARN.
+ @return a delegation token ({@link Token}) that can be used to
+         talk to YARN
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueInfo" return="org.apache.hadoop.yarn.api.records.QueueInfo"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queueName" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about a given <em>queue</em>.
+ </p>
+
+ @param queueName
+          Name of the queue whose information is needed
+ @return queue information
+ @throws YarnException
+           in case of errors or if YARN rejects the request due to
+           access-control restrictions.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAllQueues" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about all queues, recursively if there
+ is a hierarchy
+ </p>
+
+ @return a list of queue-information for all queues
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRootQueueInfos" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about top level queues.
+ </p>
+
+ @return a list of queue-information for all the top-level queues
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getChildQueueInfos" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="parent" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about all the immediate children queues
+ of the given queue
+ </p>
+
+ @param parent
+          Name of the queue whose child-queues' information is needed
+ @return a list of queue-information for all queues who are direct children
+         of the given parent queue.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueAclsInfo" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information about <em>acls</em> for <em>current user</em> on all the
+ existing queues.
+ </p>
+
+ @return a list of queue acls ({@link QueueUserACLInfo}) for
+         <em>current user</em>
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttemptReport" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given ApplicationAttempt.
+ </p>
+
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+
+ @param applicationAttemptId
+          {@link ApplicationAttemptId} of the application attempt that needs
+          a report
+ @return application attempt report
+ @throws YarnException
+ @throws ApplicationAttemptNotFoundException if application attempt
+         not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttempts" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (ApplicationAttempts) of Application in the cluster.
+ </p>
+
+ @param applicationId
+ @return a list of reports for all application attempts for specified
+         application.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerReport" return="org.apache.hadoop.yarn.api.records.ContainerReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Container.
+ </p>
+
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+
+ @param containerId
+          {@link ContainerId} of the container that needs a report
+ @return container report
+ @throws YarnException
+ @throws ContainerNotFoundException if container not found.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (Containers) of ApplicationAttempt in the cluster.
+ </p>
+
+ @param applicationAttemptId
+ @return a list of reports of all containers for specified application
+         attempts
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="moveApplicationAcrossQueues"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="queue" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Attempts to move the given application to the given queue.
+ </p>
+
+ @param appId
+    Application to move.
+ @param queue
+    Queue to place it in to.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="submitReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to submit a new reservation to the
+ {@code ResourceManager}.
+ </p>
+
+ <p>
+ The client packages all details of its request in a
+ {@link ReservationSubmissionRequest} object. This contains information
+ about the amount of capacity, temporal constraints, and gang needs.
+ Furthermore, the reservation might be composed of multiple stages, with
+ ordering dependencies among them.
+ </p>
+
+ <p>
+ In order to respond, a new admission control component in the
+ {@code ResourceManager} performs an analysis of the resources that have
+ been committed over the period of time the user is requesting, verify that
+ the user requests can be fulfilled, and that it respect a sharing policy
+ (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined
+ that the ReservationRequest is satisfiable the {@code ResourceManager}
+ answers with a {@link ReservationSubmissionResponse} that includes a
+ {@link ReservationId}. Upon failure to find a valid allocation the response
+ is an exception with the message detailing the reason of failure.
+ </p>
+
+ <p>
+ The semantics guarantees that the {@link ReservationId} returned,
+ corresponds to a valid reservation existing in the time-range request by
+ the user. The amount of capacity dedicated to such reservation can vary
+ overtime, depending of the allocation that has been determined. But it is
+ guaranteed to satisfy all the constraint expressed by the user in the
+ {@link ReservationDefinition}
+ </p>
+
+ @param request request to submit a new Reservation
+ @return response contains the {@link ReservationId} on accepting the
+         submission
+ @throws YarnException if the reservation cannot be created successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="updateReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to update an existing Reservation. This is
+ referred to as a re-negotiation process, in which a user that has
+ previously submitted a Reservation.
+ </p>
+
+ <p>
+ The allocation is attempted by virtually substituting all previous
+ allocations related to this Reservation with new ones, that satisfy the new
+ {@link ReservationDefinition}. Upon success the previous allocation is
+ atomically substituted by the new one, and on failure (i.e., if the system
+ cannot find a valid allocation for the updated request), the previous
+ allocation remains valid.
+ </p>
+
+ @param request to update an existing Reservation (the
+          {@link ReservationUpdateRequest} should refer to an existing valid
+          {@link ReservationId})
+ @return response empty on successfully updating the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           updated successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="deleteReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to remove an existing Reservation.
+ </p>
+
+ @param request to remove an existing Reservation (the
+          {@link ReservationDeleteRequest} should refer to an existing valid
+          {@link ReservationId})
+ @return response empty on successfully deleting the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           deleted successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getNodeToLabels" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node to labels mappings in existing cluster
+ </p>
+
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getLabelsToNodes" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get labels to nodes mapping
+ in existing cluster
+ </p>
+
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getLabelsToNodes" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labels" type="java.util.Set"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get labels to nodes mapping
+ for specified labels in existing cluster
+ </p>
+
+ @param labels labels for which labels to nodes mapping has to be retrieved
+ @return labels to nodes mappings for specific labels
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeLabels" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node labels in the cluster
+ </p>
+
+ @return cluster node labels collection
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.YarnClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.YarnClientApplication -->
+  <class name="YarnClientApplication" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YarnClientApplication" type="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse, org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNewApplicationResponse" return="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationSubmissionContext" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.YarnClientApplication -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api.async">
+  <!-- start class org.apache.hadoop.yarn.client.api.async.AMRMClientAsync -->
+  <class name="AMRMClientAsync" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMClientAsync" type="int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMClientAsync" type="org.apache.hadoop.yarn.client.api.AMRMClient, int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"/>
+    </method>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="client" type="org.apache.hadoop.yarn.client.api.AMRMClient"/>
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"/>
+    </method>
+    <method name="setHeartbeatInterval"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="interval" type="int"/>
+    </method>
+    <method name="getMatchingRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+    </method>
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appHostName" type="java.lang.String"/>
+      <param name="appHostPort" type="int"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Registers this application master with the resource manager. On successful
+ registration, starts the heartbeating thread.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="unregisterApplicationMaster"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <param name="appMessage" type="java.lang.String"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Unregister the application master. This must be called in the end.
+ @param appStatus Success/Failure status of the master
+ @param appMessage Diagnostics message on failure
+ @param appTrackingUrl New URL to get master info
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="addContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Request containers for resources before calling <code>allocate</code>
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="removeContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Remove previous container request. The previous container request may have
+ already been sent to the ResourceManager. So even after the remove request
+ the app must be prepared to receive an allocation for the previous request
+ even after the remove request
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="releaseAssignedContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <doc>
+      <![CDATA[Release containers assigned by the Resource Manager. If the app cannot use
+ the container or wants to give up the container then it can release them.
+ The app needs to make new requests for the released resource capability if
+ it still needs it. eg. it released non-local resources
+ @param containerId]]>
+      </doc>
+    </method>
+    <method name="getAvailableResources" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the currently available resources in the cluster.
+ A valid value is available after a call to allocate has been made
+ @return Currently available resources]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeCount" return="int"
+      abstract="true" native="false" synchronized="false"
+      

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/25] hadoop git commit: YARN-3426. Add jdiff support to YARN. (vinodkv via wangda)

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/03fc6b1b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client.2.6.0.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client.2.6.0.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client.2.6.0.xml
new file mode 100644
index 0000000..aa11aea
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client.2.6.0.xml
@@ -0,0 +1,2427 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Wed Apr 08 11:30:44 PDT 2015 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="hadoop-yarn-client 2.6.0"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-annotations.jar:/Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/jdiff.jar -verbose -classpath /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/classes:/Users/llu/.m2/repository/org/apache/hadoop/hadoop-common/2.6.0/hadoop-common-2.6.0.jar:/Users/llu/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/llu/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/llu/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/llu/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/llu/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/llu/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/llu/.m
 2/repository/commons-collections/commons-collections/3.2.1/commons-collections-3.2.1.jar:/Users/llu/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/llu/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/llu/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/llu/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/llu/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/llu/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/llu/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/h
 ttpcore-4.2.5.jar:/Users/llu/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/llu/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/llu/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/llu/.m2/repository/org/slf4j/slf4j-api/1.7.5/slf4j-api-1.7.5.jar:/Users/llu/.m2/repository/org/slf4j/slf4j-log4j12/1.7.5/slf4j-log4j12-1.7.5.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/llu/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/llu/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/para
 namer-2.3.jar:/Users/llu/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/llu/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/llu/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/llu/.m2/repository/org/apache/hadoop/hadoop-auth/2.6.0/hadoop-auth-2.6.0.jar:/Users/llu/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/llu/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/llu/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/llu/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/llu/.m2/repository/org/apache/curator/curator-framework/2.6.0/curator-framework-2.6.0.jar:/Users/llu/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/llu/.m2/repository/org/apache/curator/curator-client/
 2.6.0/curator-client-2.6.0.jar:/Users/llu/.m2/repository/org/apache/curator/curator-recipes/2.6.0/curator-recipes-2.6.0.jar:/Users/llu/.m2/repository/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar:/Users/llu/.m2/repository/org/htrace/htrace-core/3.0.4/htrace-core-3.0.4.jar:/Users/llu/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/llu/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/llu/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/llu/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/llu/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/llu/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/llu/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/llu/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/llu/.m2/repository/org/apache/hadoop/hadoop-annotations/2.6.0/hadoop-annotations-2.6.0.jar:/Librar
 y/Java/JavaVirtualMachines/jdk1.7.0_67.jdk/Contents/Home/lib/tools.jar:/Users/llu/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.6.0.jar:/Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.6.0.jar:/Users/llu/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/llu/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/llu/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/llu/.m2/repository/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/llu/.m2/repository/com/google/inject/guice/3.0/gu
 ice-3.0.jar:/Users/llu/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/llu/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/llu/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/llu/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar -sourcepath /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java -apidir /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/site/jdiff/xml -apiname hadoop-yarn-client 2.6.0 -->
+<package name="org.apache.hadoop.yarn.client.api">
+  <!-- start class org.apache.hadoop.yarn.client.api.AHSClient -->
+  <class name="AHSClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AHSClient" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAHSClient" return="org.apache.hadoop.yarn.client.api.AHSClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of AHSClient.]]>
+      </doc>
+    </method>
+    <method name="getApplicationReport" return="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Application.
+ </p>
+
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access then the following
+ fields in the report will be set to stubbed values:
+ <ul>
+ <li>host - set to "N/A"</li>
+ <li>RPC port - set to -1</li>
+ <li>client token - set to "N/A"</li>
+ <li>diagnostics - set to "N/A"</li>
+ <li>tracking URL - set to "N/A"</li>
+ <li>original tracking URL - set to "N/A"</li>
+ <li>resource usage report - all values are -1</li>
+ </ul>
+ </p>
+
+ @param appId
+          {@link ApplicationId} of the application that needs a report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of all Applications in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @return a list of reports for all applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttemptReport" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given ApplicationAttempt.
+ </p>
+
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+
+ @param applicationAttemptId
+          {@link ApplicationAttemptId} of the application attempt that needs
+          a report
+ @return application attempt report
+ @throws YarnException
+ @throws {@link ApplicationAttemptNotFoundException} if application attempt
+         not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttempts" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (ApplicationAttempts) of Application in the cluster.
+ </p>
+
+ @param applicationId
+ @return a list of reports for all application attempts for specified
+         application
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerReport" return="org.apache.hadoop.yarn.api.records.ContainerReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Container.
+ </p>
+
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+
+ @param containerId
+          {@link ContainerId} of the container that needs a report
+ @return container report
+ @throws YarnException
+ @throws {@link ContainerNotFoundException} if container not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (Containers) of ApplicationAttempt in the cluster.
+ </p>
+
+ @param applicationAttemptId
+ @return a list of reports of all containers for specified application
+         attempt
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.AHSClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.AMRMClient -->
+  <class name="AMRMClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAMRMClient" return="org.apache.hadoop.yarn.client.api.AMRMClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of AMRMClient.
+ For usage:
+ <pre>
+ {@code
+ AMRMClient.<T>createAMRMClientContainerRequest()
+ }</pre>
+ @return the newly create AMRMClient instance.]]>
+      </doc>
+    </method>
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appHostName" type="java.lang.String"/>
+      <param name="appHostPort" type="int"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Register the application master. This must be called before any
+ other interaction
+ @param appHostName Name of the host on which master is running
+ @param appHostPort Port master is listening on
+ @param appTrackingUrl URL at which the master info can be seen
+ @return <code>RegisterApplicationMasterResponse</code>
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="allocate" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="progressIndicator" type="float"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Request additional containers and receive new container allocations.
+ Requests made via <code>addContainerRequest</code> are sent to the
+ <code>ResourceManager</code>. New containers assigned to the master are
+ retrieved. Status of completed containers and node health updates are also
+ retrieved. This also doubles up as a heartbeat to the ResourceManager and
+ must be made periodically. The call may not always return any new
+ allocations of containers. App should not make concurrent allocate
+ requests. May cause request loss.
+
+ <p>
+ Note : If the user has not removed container requests that have already
+ been satisfied, then the re-register may end up sending the entire
+ container requests to the RM (including matched requests). Which would mean
+ the RM could end up giving it a lot of new allocated containers.
+ </p>
+
+ @param progressIndicator Indicates progress made by the master
+ @return the response of the allocate request
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="unregisterApplicationMaster"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <param name="appMessage" type="java.lang.String"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Unregister the application master. This must be called in the end.
+ @param appStatus Success/Failure status of the master
+ @param appMessage Diagnostics message on failure
+ @param appTrackingUrl New URL to get master info
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="addContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Request containers for resources before calling <code>allocate</code>
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="removeContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Remove previous container request. The previous container request may have
+ already been sent to the ResourceManager. So even after the remove request
+ the app must be prepared to receive an allocation for the previous request
+ even after the remove request
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="releaseAssignedContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <doc>
+      <![CDATA[Release containers assigned by the Resource Manager. If the app cannot use
+ the container or wants to give up the container then it can release them.
+ The app needs to make new requests for the released resource capability if
+ it still needs it. eg. it released non-local resources
+ @param containerId]]>
+      </doc>
+    </method>
+    <method name="getAvailableResources" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the currently available resources in the cluster.
+ A valid value is available after a call to allocate has been made
+ @return Currently available resources]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeCount" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current number of nodes in the cluster.
+ A valid values is available after a call to allocate has been made
+ @return Current number of nodes in the cluster]]>
+      </doc>
+    </method>
+    <method name="getMatchingRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Get outstanding <code>ContainerRequest</code>s matching the given
+ parameters. These ContainerRequests should have been added via
+ <code>addContainerRequest</code> earlier in the lifecycle. For performance,
+ the AMRMClient may return its internal collection directly without creating
+ a copy. Users should not perform mutable operations on the return value.
+ Each collection in the list contains requests with identical
+ <code>Resource</code> size that fit in the given capability. In a
+ collection, requests will be returned in the same order as they were added.
+ @return Collection of request matching the parameters]]>
+      </doc>
+    </method>
+    <method name="updateBlacklist"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="blacklistAdditions" type="java.util.List"/>
+      <param name="blacklistRemovals" type="java.util.List"/>
+      <doc>
+      <![CDATA[Update application's blacklist with addition or removal resources.
+
+ @param blacklistAdditions list of resources which should be added to the
+        application blacklist
+ @param blacklistRemovals list of resources which should be removed from the
+        application blacklist]]>
+      </doc>
+    </method>
+    <method name="setNMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nmTokenCache" type="org.apache.hadoop.yarn.client.api.NMTokenCache"/>
+      <doc>
+      <![CDATA[Set the NM token cache for the <code>AMRMClient</code>. This cache must
+ be shared with the {@link NMClient} used to manage containers for the
+ <code>AMRMClient</code>
+ <p/>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @param nmTokenCache the NM token cache to use.]]>
+      </doc>
+    </method>
+    <method name="getNMTokenCache" return="org.apache.hadoop.yarn.client.api.NMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the NM token cache of the <code>AMRMClient</code>. This cache must be
+ shared with the {@link NMClient} used to manage containers for the
+ <code>AMRMClient</code>.
+ <p/>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @return the NM token cache.]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="com.google.common.base.Supplier"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each 1000 ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int)}
+ and {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="com.google.common.base.Supplier"/>
+      <param name="checkEveryMillis" type="int"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each
+ <code>checkEveryMillis</code> ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check user defined checker
+ @param checkEveryMillis interval to call <code>check</code>]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="com.google.common.base.Supplier"/>
+      <param name="checkEveryMillis" type="int"/>
+      <param name="logInterval" type="int"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each
+ <code>checkEveryMillis</code> ms. In the main loop, this method will log
+ the message "waiting in main loop" for each <code>logInterval</code> times
+ iteration to confirm the thread is alive.
+ @param check user defined checker
+ @param checkEveryMillis interval to call <code>check</code>
+ @param logInterval interval to log for each]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.AMRMClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest -->
+  <class name="AMRMClient.ContainerRequest" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMClient.ContainerRequest" type="org.apache.hadoop.yarn.api.records.Resource, java.lang.String[], java.lang.String[], org.apache.hadoop.yarn.api.records.Priority"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Instantiates a {@link ContainerRequest} with the given constraints and
+ locality relaxation enabled.
+
+ @param capability
+          The {@link Resource} to be requested for each container.
+ @param nodes
+          Any hosts to request that the containers are placed on.
+ @param racks
+          Any racks to request that the containers are placed on. The
+          racks corresponding to any hosts requested will be automatically
+          added to this list.
+ @param priority
+          The priority at which to request the containers. Higher
+          priorities have lower numerical values.]]>
+      </doc>
+    </constructor>
+    <constructor name="AMRMClient.ContainerRequest" type="org.apache.hadoop.yarn.api.records.Resource, java.lang.String[], java.lang.String[], org.apache.hadoop.yarn.api.records.Priority, boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Instantiates a {@link ContainerRequest} with the given constraints.
+
+ @param capability
+          The {@link Resource} to be requested for each container.
+ @param nodes
+          Any hosts to request that the containers are placed on.
+ @param racks
+          Any racks to request that the containers are placed on. The
+          racks corresponding to any hosts requested will be automatically
+          added to this list.
+ @param priority
+          The priority at which to request the containers. Higher
+          priorities have lower numerical values.
+ @param relaxLocality
+          If true, containers for this request may be assigned on hosts
+          and racks other than the ones explicitly requested.]]>
+      </doc>
+    </constructor>
+    <constructor name="AMRMClient.ContainerRequest" type="org.apache.hadoop.yarn.api.records.Resource, java.lang.String[], java.lang.String[], org.apache.hadoop.yarn.api.records.Priority, boolean, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Instantiates a {@link ContainerRequest} with the given constraints.
+
+ @param capability
+          The {@link Resource} to be requested for each container.
+ @param nodes
+          Any hosts to request that the containers are placed on.
+ @param racks
+          Any racks to request that the containers are placed on. The
+          racks corresponding to any hosts requested will be automatically
+          added to this list.
+ @param priority
+          The priority at which to request the containers. Higher
+          priorities have lower numerical values.
+ @param relaxLocality
+          If true, containers for this request may be assigned on hosts
+          and racks other than the ones explicitly requested.
+ @param nodeLabelsExpression
+          Set node labels to allocate resource]]>
+      </doc>
+    </constructor>
+    <method name="getCapability" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNodes" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRacks" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRelaxLocality" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNodeLabelExpression" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Object to represent a single container request for resources. Scheduler
+ documentation should be consulted for the specifics of how the parameters
+ are honored.
+
+ By default, YARN schedulers try to allocate containers at the requested
+ locations but they may relax the constraints in order to expedite meeting
+ allocations limits. They first relax the constraint to the same rack as the
+ requested node and then to anywhere in the cluster. The relaxLocality flag
+ may be used to disable locality relaxation and request containers at only
+ specific locations. The following conditions apply.
+ <ul>
+ <li>Within a priority, all container requests must have the same value for
+ locality relaxation. Either enabled or disabled.</li>
+ <li>If locality relaxation is disabled, then across requests, locations at
+ different network levels may not be specified. E.g. its invalid to make a
+ request for a specific node and another request for a specific rack.</li>
+ <li>If locality relaxation is disabled, then only within the same request,
+ a node and its rack may be specified together. This allows for a specific
+ rack with a preference for a specific node within that rack.</li>
+ <li></li>
+ </ul>
+ To re-enable locality relaxation at a given priority, all pending requests
+ with locality relaxation disabled must be first removed. Then they can be
+ added back with locality relaxation enabled.
+
+ All getters return immutable values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest -->
+  <!-- start class org.apache.hadoop.yarn.client.api.InvalidContainerRequestException -->
+  <class name="InvalidContainerRequestException" extends="org.apache.hadoop.yarn.exceptions.YarnRuntimeException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="InvalidContainerRequestException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="InvalidContainerRequestException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="InvalidContainerRequestException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Thrown when an arguments are combined to construct a
+ <code>AMRMClient.ContainerRequest</code> in an invalid way.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.InvalidContainerRequestException -->
+  <!-- start class org.apache.hadoop.yarn.client.api.NMClient -->
+  <class name="NMClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createNMClient" return="org.apache.hadoop.yarn.client.api.NMClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of NMClient.]]>
+      </doc>
+    </method>
+    <method name="createNMClient" return="org.apache.hadoop.yarn.client.api.NMClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Create a new instance of NMClient.]]>
+      </doc>
+    </method>
+    <method name="startContainer" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <param name="containerLaunchContext" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Start an allocated container.</p>
+
+ <p>The <code>ApplicationMaster</code> or other applications that use the
+ client must provide the details of the allocated container, including the
+ Id, the assigned node's Id and the token via {@link Container}. In
+ addition, the AM needs to provide the {@link ContainerLaunchContext} as
+ well.</p>
+
+ @param container the allocated container
+ @param containerLaunchContext the context information needed by the
+                               <code>NodeManager</code> to launch the
+                               container
+ @return a map between the auxiliary service names and their outputs
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="stopContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Stop an started container.</p>
+
+ @param containerId the Id of the started container
+ @param nodeId the Id of the <code>NodeManager</code>
+
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerStatus" return="org.apache.hadoop.yarn.api.records.ContainerStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Query the status of a container.</p>
+
+ @param containerId the Id of the started container
+ @param nodeId the Id of the <code>NodeManager</code>
+
+ @return the status of a container
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="cleanupRunningContainersOnStop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="enabled" type="boolean"/>
+      <doc>
+      <![CDATA[<p>Set whether the containers that are started by this client, and are
+ still running should be stopped when the client stops. By default, the
+ feature should be enabled.</p> However, containers will be stopped only
+ when service is stopped. i.e. after {@link NMClient#stop()}.
+
+ @param enabled whether the feature is enabled or not]]>
+      </doc>
+    </method>
+    <method name="setNMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nmTokenCache" type="org.apache.hadoop.yarn.client.api.NMTokenCache"/>
+      <doc>
+      <![CDATA[Set the NM Token cache of the <code>NMClient</code>. This cache must be
+ shared with the {@link AMRMClient} that requested the containers managed
+ by this <code>NMClient</code>
+ <p/>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @param nmTokenCache the NM token cache to use.]]>
+      </doc>
+    </method>
+    <method name="getNMTokenCache" return="org.apache.hadoop.yarn.client.api.NMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the NM token cache of the <code>NMClient</code>. This cache must be
+ shared with the {@link AMRMClient} that requested the containers managed
+ by this <code>NMClient</code>
+ <p/>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @return the NM token cache]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.NMClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.NMTokenCache -->
+  <class name="NMTokenCache" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMTokenCache"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Creates a NM token cache instance.]]>
+      </doc>
+    </constructor>
+    <method name="getSingleton" return="org.apache.hadoop.yarn.client.api.NMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the singleton NM token cache.
+
+ @return the singleton NM token cache.]]>
+      </doc>
+    </method>
+    <method name="getNMToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns NMToken, null if absent. Only the singleton obtained from
+ {@link #getSingleton()} is looked at for the tokens. If you are using your
+ own NMTokenCache that is different from the singleton, use
+ {@link #getToken(String) }
+
+ @param nodeAddr
+ @return {@link Token} NMToken required for communicating with node manager]]>
+      </doc>
+    </method>
+    <method name="setNMToken"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <param name="token" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <doc>
+      <![CDATA[Sets the NMToken for node address only in the singleton obtained from
+ {@link #getSingleton()}. If you are using your own NMTokenCache that is
+ different from the singleton, use {@link #setToken(String, Token) }
+
+ @param nodeAddr
+          node address (host:port)
+ @param token
+          NMToken]]>
+      </doc>
+    </method>
+    <method name="getToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns NMToken, null if absent
+ @param nodeAddr
+ @return {@link Token} NMToken required for communicating with node
+         manager]]>
+      </doc>
+    </method>
+    <method name="setToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <param name="token" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <doc>
+      <![CDATA[Sets the NMToken for node address
+ @param nodeAddr node address (host:port)
+ @param token NMToken]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[NMTokenCache manages NMTokens required for an Application Master
+ communicating with individual NodeManagers.
+ <p/>
+ By default Yarn client libraries {@link AMRMClient} and {@link NMClient} use
+ {@link #getSingleton()} instance of the cache.
+ <ul>
+ <li>Using the singleton instance of the cache is appropriate when running a
+ single ApplicationMaster in the same JVM.</li>
+ <li>When using the singleton, users don't need to do anything special,
+ {@link AMRMClient} and {@link NMClient} are already set up to use the default
+ singleton {@link NMTokenCache}</li>
+ </ul>
+ <p/>
+ If running multiple Application Masters in the same JVM, a different cache
+ instance should be used for each Application Master.
+ <p/>
+ <ul>
+ <li>
+ If using the {@link AMRMClient} and the {@link NMClient}, setting up and using
+ an instance cache is as follows:
+ <p/>
+
+ <pre>
+   NMTokenCache nmTokenCache = new NMTokenCache();
+   AMRMClient rmClient = AMRMClient.createAMRMClient();
+   NMClient nmClient = NMClient.createNMClient();
+   nmClient.setNMTokenCache(nmTokenCache);
+   ...
+ </pre>
+ </li>
+ <li>
+ If using the {@link AMRMClientAsync} and the {@link NMClientAsync}, setting up
+ and using an instance cache is as follows:
+ <p/>
+
+ <pre>
+   NMTokenCache nmTokenCache = new NMTokenCache();
+   AMRMClient rmClient = AMRMClient.createAMRMClient();
+   NMClient nmClient = NMClient.createNMClient();
+   nmClient.setNMTokenCache(nmTokenCache);
+   AMRMClientAsync rmClientAsync = new AMRMClientAsync(rmClient, 1000, [AMRM_CALLBACK]);
+   NMClientAsync nmClientAsync = new NMClientAsync("nmClient", nmClient, [NM_CALLBACK]);
+   ...
+ </pre>
+ </li>
+ <li>
+ If using {@link ApplicationMasterProtocol} and
+ {@link ContainerManagementProtocol} directly, setting up and using an
+ instance cache is as follows:
+ <p/>
+
+ <pre>
+   NMTokenCache nmTokenCache = new NMTokenCache();
+   ...
+   ApplicationMasterProtocol amPro = ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class);
+   ...
+   AllocateRequest allocateRequest = ...
+   ...
+   AllocateResponse allocateResponse = rmClient.allocate(allocateRequest);
+   for (NMToken token : allocateResponse.getNMTokens()) {
+     nmTokenCache.setToken(token.getNodeId().toString(), token.getToken());
+   }
+   ...
+   ContainerManagementProtocolProxy nmPro = ContainerManagementProtocolProxy(conf, nmTokenCache);
+   ...
+   nmPro.startContainer(container, containerContext);
+   ...
+ </pre>
+ </li>
+ </ul>
+ It is also possible to mix the usage of a client (<code>AMRMClient</code> or
+ <code>NMClient</code>, or the async versions of them) with a protocol proxy (
+ <code>ContainerManagementProtocolProxy</code> or
+ <code>ApplicationMasterProtocol</code>).]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.NMTokenCache -->
+  <!-- start class org.apache.hadoop.yarn.client.api.YarnClient -->
+  <class name="YarnClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YarnClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createYarnClient" return="org.apache.hadoop.yarn.client.api.YarnClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of YarnClient.]]>
+      </doc>
+    </method>
+    <method name="createApplication" return="org.apache.hadoop.yarn.client.api.YarnClientApplication"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Obtain a {@link YarnClientApplication} for a new application,
+ which in turn contains the {@link ApplicationSubmissionContext} and
+ {@link org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse}
+ objects.
+ </p>
+
+ @return {@link YarnClientApplication} built for a new application
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="submitApplication" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appContext" type="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Submit a new application to <code>YARN.</code> It is a blocking call - it
+ will not return {@link ApplicationId} until the submitted application is
+ submitted successfully and accepted by the ResourceManager.
+ </p>
+
+ <p>
+ Users should provide an {@link ApplicationId} as part of the parameter
+ {@link ApplicationSubmissionContext} when submitting a new application,
+ otherwise it will throw the {@link ApplicationIdNotProvidedException}.
+ </p>
+
+ <p>This internally calls {@link ApplicationClientProtocol#submitApplication
+ (SubmitApplicationRequest)}, and after that, it internally invokes
+ {@link ApplicationClientProtocol#getApplicationReport
+ (GetApplicationReportRequest)} and waits till it can make sure that the
+ application gets properly submitted. If RM fails over or RM restart
+ happens before ResourceManager saves the application's state,
+ {@link ApplicationClientProtocol
+ #getApplicationReport(GetApplicationReportRequest)} will throw
+ the {@link ApplicationNotFoundException}. This API automatically resubmits
+ the application with the same {@link ApplicationSubmissionContext} when it
+ catches the {@link ApplicationNotFoundException}</p>
+
+ @param appContext
+          {@link ApplicationSubmissionContext} containing all the details
+          needed to submit a new application
+ @return {@link ApplicationId} of the accepted application
+ @throws YarnException
+ @throws IOException
+ @see #createApplication()]]>
+      </doc>
+    </method>
+    <method name="killApplication"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Kill an application identified by given ID.
+ </p>
+
+ @param applicationId
+          {@link ApplicationId} of the application that needs to be killed
+ @throws YarnException
+           in case of errors or if YARN rejects the request due to
+           access-control restrictions.
+ @throws IOException
+ @see #getQueueAclsInfo()]]>
+      </doc>
+    </method>
+    <method name="getApplicationReport" return="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Application.
+ </p>
+
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access then the following
+ fields in the report will be set to stubbed values:
+ <ul>
+ <li>host - set to "N/A"</li>
+ <li>RPC port - set to -1</li>
+ <li>client token - set to "N/A"</li>
+ <li>diagnostics - set to "N/A"</li>
+ <li>tracking URL - set to "N/A"</li>
+ <li>original tracking URL - set to "N/A"</li>
+ <li>resource usage report - all values are -1</li>
+ </ul>
+ </p>
+
+ @param appId
+          {@link ApplicationId} of the application that needs a report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAMRMToken" return="org.apache.hadoop.security.token.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the AMRM token of the application.
+ <p/>
+ The AMRM token is required for AM to RM scheduling operations. For
+ managed Application Masters Yarn takes care of injecting it. For unmanaged
+ Applications Masters, the token must be obtained via this method and set
+ in the {@link org.apache.hadoop.security.UserGroupInformation} of the
+ current user.
+ <p/>
+ The AMRM token will be returned only if all the following conditions are
+ met:
+ <li>
+   <ul>the requester is the owner of the ApplicationMaster</ul>
+   <ul>the application master is an unmanaged ApplicationMaster</ul>
+   <ul>the application master is in ACCEPTED state</ul>
+ </li>
+ Else this method returns NULL.
+
+ @param appId {@link ApplicationId} of the application to get the AMRM token
+ @return the AMRM token if available
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of all Applications in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @return a list of reports of all running applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTypes" type="java.util.Set"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications
+ matching the given application types in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationTypes
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications matching the given
+ application states in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationStates
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTypes" type="java.util.Set"/>
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications matching the given
+ application types and application states in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationTypes
+ @param applicationStates
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getYarnClusterMetrics" return="org.apache.hadoop.yarn.api.records.YarnClusterMetrics"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get metrics ({@link YarnClusterMetrics}) about the cluster.
+ </p>
+
+ @return cluster metrics
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getNodeReports" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="states" type="org.apache.hadoop.yarn.api.records.NodeState[]"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of nodes ({@link NodeReport}) in the cluster.
+ </p>
+
+ @param states The {@link NodeState}s to filter on. If no filter states are
+          given, nodes in all states will be returned.
+ @return A list of node reports
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRMDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="org.apache.hadoop.io.Text"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a delegation token so as to be able to talk to YARN using those tokens.
+
+ @param renewer
+          Address of the renewer who can renew these tokens when needed by
+          securely talking to YARN.
+ @return a delegation token ({@link Token}) that can be used to
+         talk to YARN
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueInfo" return="org.apache.hadoop.yarn.api.records.QueueInfo"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queueName" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about a given <em>queue</em>.
+ </p>
+
+ @param queueName
+          Name of the queue whose information is needed
+ @return queue information
+ @throws YarnException
+           in case of errors or if YARN rejects the request due to
+           access-control restrictions.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAllQueues" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about all queues, recursively if there
+ is a hierarchy
+ </p>
+
+ @return a list of queue-information for all queues
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRootQueueInfos" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about top level queues.
+ </p>
+
+ @return a list of queue-information for all the top-level queues
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getChildQueueInfos" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="parent" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about all the immediate children queues
+ of the given queue
+ </p>
+
+ @param parent
+          Name of the queue whose child-queues' information is needed
+ @return a list of queue-information for all queues who are direct children
+         of the given parent queue.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueAclsInfo" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information about <em>acls</em> for <em>current user</em> on all the
+ existing queues.
+ </p>
+
+ @return a list of queue acls ({@link QueueUserACLInfo}) for
+         <em>current user</em>
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttemptReport" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given ApplicationAttempt.
+ </p>
+
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+
+ @param applicationAttemptId
+          {@link ApplicationAttemptId} of the application attempt that needs
+          a report
+ @return application attempt report
+ @throws YarnException
+ @throws {@link ApplicationAttemptNotFoundException} if application attempt
+         not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttempts" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (ApplicationAttempts) of Application in the cluster.
+ </p>
+
+ @param applicationId
+ @return a list of reports for all application attempts for specified
+         application.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerReport" return="org.apache.hadoop.yarn.api.records.ContainerReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Container.
+ </p>
+
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+
+ @param containerId
+          {@link ContainerId} of the container that needs a report
+ @return container report
+ @throws YarnException
+ @throws {@link ContainerNotFoundException} if container not found.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (Containers) of ApplicationAttempt in the cluster.
+ </p>
+
+ @param applicationAttemptId
+ @return a list of reports of all containers for specified application
+         attempts
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="moveApplicationAcrossQueues"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="queue" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Attempts to move the given application to the given queue.
+ </p>
+
+ @param appId
+    Application to move.
+ @param queue
+    Queue to place it in to.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="submitReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to submit a new reservation to the
+ {@code ResourceManager}.
+ </p>
+
+ <p>
+ The client packages all details of its request in a
+ {@link ReservationSubmissionRequest} object. This contains information
+ about the amount of capacity, temporal constraints, and gang needs.
+ Furthermore, the reservation might be composed of multiple stages, with
+ ordering dependencies among them.
+ </p>
+
+ <p>
+ In order to respond, a new admission control component in the
+ {@code ResourceManager} performs an analysis of the resources that have
+ been committed over the period of time the user is requesting, verify that
+ the user requests can be fulfilled, and that it respect a sharing policy
+ (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined
+ that the ReservationRequest is satisfiable the {@code ResourceManager}
+ answers with a {@link ReservationSubmissionResponse} that includes a
+ {@link ReservationId}. Upon failure to find a valid allocation the response
+ is an exception with the message detailing the reason of failure.
+ </p>
+
+ <p>
+ The semantics guarantees that the {@link ReservationId} returned,
+ corresponds to a valid reservation existing in the time-range request by
+ the user. The amount of capacity dedicated to such reservation can vary
+ overtime, depending of the allocation that has been determined. But it is
+ guaranteed to satisfy all the constraint expressed by the user in the
+ {@link ReservationDefinition}
+ </p>
+
+ @param request request to submit a new Reservation
+ @return response contains the {@link ReservationId} on accepting the
+         submission
+ @throws YarnException if the reservation cannot be created successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="updateReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to update an existing Reservation. This is
+ referred to as a re-negotiation process, in which a user that has
+ previously submitted a Reservation.
+ </p>
+
+ <p>
+ The allocation is attempted by virtually substituting all previous
+ allocations related to this Reservation with new ones, that satisfy the new
+ {@link ReservationDefinition}. Upon success the previous allocation is
+ atomically substituted by the new one, and on failure (i.e., if the system
+ cannot find a valid allocation for the updated request), the previous
+ allocation remains valid.
+ </p>
+
+ @param request to update an existing Reservation (the
+          {@link ReservationUpdateRequest} should refer to an existing valid
+          {@link ReservationId})
+ @return response empty on successfully updating the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           updated successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="deleteReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to remove an existing Reservation.
+ </p>
+
+ @param request to remove an existing Reservation (the
+          {@link ReservationDeleteRequest} should refer to an existing valid
+          {@link ReservationId})
+ @return response empty on successfully deleting the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           deleted successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getNodeToLabels" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node to labels mappings in existing cluster
+ </p>
+
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeLabels" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node labels in the cluster
+ </p>
+
+ @return cluster node labels collection
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.YarnClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.YarnClientApplication -->
+  <class name="YarnClientApplication" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YarnClientApplication" type="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse, org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNewApplicationResponse" return="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationSubmissionContext" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.YarnClientApplication -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api.async">
+  <!-- start class org.apache.hadoop.yarn.client.api.async.AMRMClientAsync -->
+  <class name="AMRMClientAsync" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMClientAsync" type="int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMClientAsync" type="org.apache.hadoop.yarn.client.api.AMRMClient, int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"/>
+    </method>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="client" type="org.apache.hadoop.yarn.client.api.AMRMClient"/>
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"/>
+    </method>
+    <method name="setHeartbeatInterval"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="interval" type="int"/>
+    </method>
+    <method name="getMatchingRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+    </method>
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appHostName" type="java.lang.String"/>
+      <param name="appHostPort" type="int"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Registers this application master with the resource manager. On successful
+ registration, starts the heartbeating thread.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="unregisterApplicationMaster"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <param name="appMessage" type="java.lang.String"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Unregister the application master. This must be called in the end.
+ @param appStatus Success/Failure status of the master
+ @param appMessage Diagnostics message on failure
+ @param appTrackingUrl New URL to get master info
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="addContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Request containers for resources before calling <code>allocate</code>
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="removeContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Remove previous container request. The previous container request may have
+ already been sent to the ResourceManager. So even after the remove request
+ the app must be prepared to receive an allocation for the previous request
+ even after the remove request
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="releaseAssignedContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <doc>
+      <![CDATA[Release containers assigned by the Resource Manager. If the app cannot use
+ the container or wants to give up the container then it can release them.
+ The app needs to make new requests for the released resource capability if
+ it still needs it. eg. it released non-local resources
+ @param containerId]]>
+      </doc>
+    </method>
+    <method name="getAvailableResources" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the currently available resources in the cluster.
+ A valid value is available after a call to allocate has been made
+ @return Currently available resources]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeCount" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current number of nodes in the cluster.
+ A valid values is available after a call to allocate has been made
+ @return Current number of nodes in the cluster]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="com.google.common.base.Supplier"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each 1000 ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int)}
+ and {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="com.google.common.base.Supplier"/>
+      <param name="checkEveryMillis" type="int"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each
+ <code>checkEveryMillis</code> ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check user defined checker
+ @param checkEveryMillis interval to call <code>check</code>]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="com.google.common.base.Supplier"/>
+      <param name="checkEveryMillis" type="int"/>
+      <param name="logInterval" type="int"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each
+ <code>checkEveryMillis</code> ms. In the main loop, this method will log
+ the message "waiting in main loop" for each <code>logInterval</code> times
+ iteration to confirm the thread is alive.
+ @param check user defined checker
+ @param checkEveryMillis interval to call <code>check</code>
+ @param logInterval interval to log for each]]>
+      </doc>
+    </method>
+    <field name="client" type="org.apache.hadoop.yarn.client.api.AMRMClient"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="handler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="heartbeatIntervalMs" type="java.util.concurrent.atomic.AtomicInteger"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[<code>AMRMClientAsync</code> handles communication with the ResourceManager
+ and provides asynchronous updates on events such as container allocations and
+ completions.  It contains a thread that sends periodic heartbeats to the
+ ResourceManager.
+
+ It should be used by implementing a CallbackHandler:
+ <pre>
+ {@code
+ class MyCallbackHandler implements AMRMClientAsync.CallbackHandler {
+   public void onContainersAllocated(List<Container> containers) {
+     [run tasks on the containers]
+   }
+
+   public void onContainersCompleted(List<ContainerStatus> statuses) {
+     [update progress, check whether app is done]
+   }
+
+   public void onNodesUpdated(List<NodeReport> updated) {}
+
+   public void onReboot() {}
+ }
+ }
+ </pre>
+
+ The client's lifecycle should be managed similarly to the following:
+
+ <pre>
+ {@code
+ AMRMClientAsync asyncClient =
+     createAMRMClientAsync(appAttId, 1000, new MyCallbackhandler());
+ asyncClient.init(conf);
+ asyncClient.start();
+ RegisterApplicationMasterResponse response = asyncClient
+    .registerApplicationMaster(appMasterHostname, appMasterRpcPort,
+       appMasterTrackingUrl);
+ asyncClient.addContainerRequest(containerRequest);
+ [... wait for application to complete]
+ asyncClient.unregisterApplicationMaster(status, appMsg, trackingUrl);
+ asyncClient.stop();
+ }
+ </pre>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.async.AMRMClientAsync -->
+  <!-- start interface org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler -->
+  <interface name="AMRMClientAsync.CallbackHandler"    abstract="true"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="onContainersCompleted"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="statuses" type="java.util.List"/>
+      <doc>
+      <![CDATA[Called when the ResourceManager responds to a heartbeat with completed
+ containers. If the response contains both completed containers and
+ allocated containers, this will be called before containersAllocated.]]>
+      </doc>
+    </method>
+    <method name="onContainersAllocated"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containers" type="java.util.List"/>
+      <doc>
+      <![CDATA[Called when the ResourceManager responds to a heartbeat with allocated
+ containers. If the response containers both completed containers and
+ allocated containers, this will be called after containersCompleted.]]>
+      </doc>
+    </method>
+    <method name="onShutdownRequest"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Called when the ResourceManager wants the ApplicationMaster to shutdown
+ for being out of sync etc. The ApplicationMaster should not unregister
+ with the RM unless the ApplicationMaster wants to be the last attempt.]]>
+      </doc>
+    </method>
+    <method name="onNodesUpdated"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="updatedNodes" type="java.util.List"/>
+      <doc>
+      <![CDATA[Called when nodes tracked by the ResourceManager have changed in health,
+ availability etc.]]>
+      <

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/25] hadoop git commit: YARN-5197. RM leaks containers if running container disappears from node update. Contributed by Jason Lowe.

Posted by ae...@apache.org.
YARN-5197. RM leaks containers if running container disappears from node update. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0f4620c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0f4620c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0f4620c

Branch: refs/heads/HDFS-1312
Commit: e0f4620cc7db3db4b781e6042ab7dd754af28f18
Parents: 8a1dcce
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Sat Jun 11 10:22:27 2016 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Sat Jun 11 10:22:27 2016 +0530

----------------------------------------------------------------------
 .../resourcemanager/rmnode/RMNodeImpl.java      | 37 +++++++++++++
 .../yarn/server/resourcemanager/MockNM.java     | 57 ++++++++++++++------
 .../resourcemanager/TestRMNodeTransitions.java  | 44 +++++++++++++++
 3 files changed, 121 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0f4620c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 4b65675..a3a6b30 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -24,6 +24,7 @@ import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -72,6 +73,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRunningOnNodeEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.AllocationExpirationInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent;
@@ -1311,6 +1313,7 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
         new ArrayList<ContainerStatus>();
     List<ContainerStatus> completedContainers =
         new ArrayList<ContainerStatus>();
+    int numRemoteRunningContainers = 0;
     for (ContainerStatus remoteContainer : containerStatuses) {
       ContainerId containerId = remoteContainer.getContainerId();
 
@@ -1344,6 +1347,7 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
       if (remoteContainer.getState() == ContainerState.RUNNING) {
         // Process only GUARANTEED containers in the RM.
         if (remoteContainer.getExecutionType() == ExecutionType.GUARANTEED) {
+          ++numRemoteRunningContainers;
           if (!launchedContainers.contains(containerId)) {
             // Just launched container. RM knows about it the first time.
             launchedContainers.add(containerId);
@@ -1366,12 +1370,45 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
         completedContainers.add(remoteContainer);
       }
     }
+    completedContainers.addAll(findLostContainers(
+          numRemoteRunningContainers, containerStatuses));
+
     if (newlyLaunchedContainers.size() != 0 || completedContainers.size() != 0) {
       nodeUpdateQueue.add(new UpdatedContainerInfo(newlyLaunchedContainers,
           completedContainers));
     }
   }
 
+  private List<ContainerStatus> findLostContainers(int numRemoteRunning,
+      List<ContainerStatus> containerStatuses) {
+    if (numRemoteRunning >= launchedContainers.size()) {
+      return Collections.emptyList();
+    }
+    Set<ContainerId> nodeContainers =
+        new HashSet<ContainerId>(numRemoteRunning);
+    List<ContainerStatus> lostContainers = new ArrayList<ContainerStatus>(
+        launchedContainers.size() - numRemoteRunning);
+    for (ContainerStatus remoteContainer : containerStatuses) {
+      if (remoteContainer.getState() == ContainerState.RUNNING
+          && remoteContainer.getExecutionType() == ExecutionType.GUARANTEED) {
+        nodeContainers.add(remoteContainer.getContainerId());
+      }
+    }
+    Iterator<ContainerId> iter = launchedContainers.iterator();
+    while (iter.hasNext()) {
+      ContainerId containerId = iter.next();
+      if (!nodeContainers.contains(containerId)) {
+        String diag = "Container " + containerId
+            + " was running but not reported from " + nodeId;
+        LOG.warn(diag);
+        lostContainers.add(SchedulerUtils.createAbnormalContainerStatus(
+            containerId, diag));
+        iter.remove();
+      }
+    }
+    return lostContainers;
+  }
+
   private void handleLogAggregationStatus(
       List<LogAggregationReport> logAggregationReportsForApps) {
     for (LogAggregationReport report : logAggregationReportsForApps) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0f4620c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
index 04ea51c..2e2bef7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
@@ -28,6 +28,7 @@ import java.util.Map;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -57,6 +58,8 @@ public class MockNM {
   private MasterKey currentContainerTokenMasterKey;
   private MasterKey currentNMTokenMasterKey;
   private String version;
+  private Map<ContainerId, ContainerStatus> containerStats =
+      new HashMap<ContainerId, ContainerStatus>();
 
   public MockNM(String nodeIdStr, int memory, ResourceTrackerService resourceTracker) {
     // scale vcores based on the requested memory
@@ -106,14 +109,12 @@ public class MockNM {
   }
 
   public void containerIncreaseStatus(Container container) throws Exception {
-    Map<ApplicationId, List<ContainerStatus>> conts = new HashMap<>();
     ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
         container.getId(), ContainerState.RUNNING, "Success", 0,
             container.getResource());
-    conts.put(container.getId().getApplicationAttemptId().getApplicationId(),
-        Collections.singletonList(containerStatus));
     List<Container> increasedConts = Collections.singletonList(container);
-    nodeHeartbeat(conts, increasedConts, true, ++responseId);
+    nodeHeartbeat(Collections.singletonList(containerStatus), increasedConts,
+        true, ++responseId);
   }
 
   public RegisterNodeManagerResponse registerNode() throws Exception {
@@ -147,18 +148,27 @@ public class MockNM {
       memory = (int) newResource.getMemorySize();
       vCores = newResource.getVirtualCores();
     }
+    containerStats.clear();
+    if (containerReports != null) {
+      for (NMContainerStatus report : containerReports) {
+        if (report.getContainerState() != ContainerState.COMPLETE) {
+          containerStats.put(report.getContainerId(),
+              ContainerStatus.newInstance(report.getContainerId(),
+                  report.getContainerState(), report.getDiagnostics(),
+                  report.getContainerExitStatus()));
+        }
+      }
+    }
     return registrationResponse;
   }
 
   public NodeHeartbeatResponse nodeHeartbeat(boolean isHealthy) throws Exception {
-    return nodeHeartbeat(new HashMap<ApplicationId, List<ContainerStatus>>(),
-        isHealthy, ++responseId);
+    return nodeHeartbeat(Collections.<ContainerStatus>emptyList(),
+        Collections.<Container>emptyList(), isHealthy, ++responseId);
   }
 
   public NodeHeartbeatResponse nodeHeartbeat(ApplicationAttemptId attemptId,
       long containerId, ContainerState containerState) throws Exception {
-    HashMap<ApplicationId, List<ContainerStatus>> nodeUpdate =
-        new HashMap<ApplicationId, List<ContainerStatus>>(1);
     ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
         BuilderUtils.newContainerId(attemptId, containerId), containerState,
         "Success", 0, BuilderUtils.newResource(memory, vCores));
@@ -166,8 +176,8 @@ public class MockNM {
         new ArrayList<ContainerStatus>(1);
     containerStatusList.add(containerStatus);
     Log.info("ContainerStatus: " + containerStatus);
-    nodeUpdate.put(attemptId.getApplicationId(), containerStatusList);
-    return nodeHeartbeat(nodeUpdate, true);
+    return nodeHeartbeat(containerStatusList,
+        Collections.<Container>emptyList(), true, ++responseId);
   }
 
   public NodeHeartbeatResponse nodeHeartbeat(Map<ApplicationId,
@@ -177,19 +187,32 @@ public class MockNM {
 
   public NodeHeartbeatResponse nodeHeartbeat(Map<ApplicationId,
       List<ContainerStatus>> conts, boolean isHealthy, int resId) throws Exception {
-    return nodeHeartbeat(conts, new ArrayList<Container>(), isHealthy, resId);
+    ArrayList<ContainerStatus> updatedStats = new ArrayList<ContainerStatus>();
+    for (List<ContainerStatus> stats : conts.values()) {
+      updatedStats.addAll(stats);
+    }
+    return nodeHeartbeat(updatedStats, Collections.<Container>emptyList(),
+        isHealthy, resId);
   }
 
-  public NodeHeartbeatResponse nodeHeartbeat(Map<ApplicationId,
-      List<ContainerStatus>> conts, List<Container> increasedConts,
-          boolean isHealthy, int resId) throws Exception {
+  public NodeHeartbeatResponse nodeHeartbeat(List<ContainerStatus> updatedStats,
+      List<Container> increasedConts, boolean isHealthy, int resId)
+          throws Exception {
     NodeHeartbeatRequest req = Records.newRecord(NodeHeartbeatRequest.class);
     NodeStatus status = Records.newRecord(NodeStatus.class);
     status.setResponseId(resId);
     status.setNodeId(nodeId);
-    for (Map.Entry<ApplicationId, List<ContainerStatus>> entry : conts.entrySet()) {
-      Log.info("entry.getValue() " + entry.getValue());
-      status.setContainersStatuses(entry.getValue());
+    ArrayList<ContainerId> completedContainers = new ArrayList<ContainerId>();
+    for (ContainerStatus stat : updatedStats) {
+      if (stat.getState() == ContainerState.COMPLETE) {
+        completedContainers.add(stat.getContainerId());
+      }
+      containerStats.put(stat.getContainerId(), stat);
+    }
+    status.setContainersStatuses(
+        new ArrayList<ContainerStatus>(containerStats.values()));
+    for (ContainerId cid : completedContainers) {
+      containerStats.remove(cid);
     }
     status.setIncreasedContainers(increasedConts);
     NodeHealthStatus healthStatus = Records.newRecord(NodeHealthStatus.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0f4620c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
index 16fe998..83a7c73 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
@@ -34,6 +34,7 @@ import java.util.Random;
 import org.apache.hadoop.util.HostsFileReader;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -1021,4 +1022,47 @@ public class TestRMNodeTransitions {
     Resource originalCapacity = node.getOriginalTotalCapability();
     assertEquals("Original total capability not null after recommission", null, originalCapacity);
   }
+
+  @Test
+  public void testDisappearingContainer() {
+    ContainerId cid1 = BuilderUtils.newContainerId(
+        BuilderUtils.newApplicationAttemptId(
+            BuilderUtils.newApplicationId(1, 1), 1), 1);
+    ContainerId cid2 = BuilderUtils.newContainerId(
+        BuilderUtils.newApplicationAttemptId(
+            BuilderUtils.newApplicationId(2, 2), 2), 2);
+    ArrayList<ContainerStatus> containerStats =
+        new ArrayList<ContainerStatus>();
+    containerStats.add(ContainerStatus.newInstance(cid1,
+        ContainerState.RUNNING, "", -1));
+    containerStats.add(ContainerStatus.newInstance(cid2,
+        ContainerState.RUNNING, "", -1));
+    node = getRunningNode();
+    node.handle(getMockRMNodeStatusEvent(containerStats));
+    assertEquals("unexpected number of running containers",
+        2, node.getLaunchedContainers().size());
+    Assert.assertTrue("first container not running",
+        node.getLaunchedContainers().contains(cid1));
+    Assert.assertTrue("second container not running",
+        node.getLaunchedContainers().contains(cid2));
+    assertEquals("already completed containers",
+        0, completedContainers.size());
+    containerStats.remove(0);
+    node.handle(getMockRMNodeStatusEvent(containerStats));
+    assertEquals("expected one container to be completed",
+        1, completedContainers.size());
+    ContainerStatus cs = completedContainers.get(0);
+    assertEquals("first container not the one that completed",
+        cid1, cs.getContainerId());
+    assertEquals("completed container not marked complete",
+        ContainerState.COMPLETE, cs.getState());
+    assertEquals("completed container not marked aborted",
+        ContainerExitStatus.ABORTED, cs.getExitStatus());
+    Assert.assertTrue("completed container not marked missing",
+        cs.getDiagnostics().contains("not reported"));
+    assertEquals("unexpected number of running containers",
+        1, node.getLaunchedContainers().size());
+    Assert.assertTrue("second container not running",
+        node.getLaunchedContainers().contains(cid2));
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/25] hadoop git commit: HADOOP-13213. Small Documentation bug with AuthenticatedURL in hadoop-auth. Contributed by Tom Ellis.

Posted by ae...@apache.org.
HADOOP-13213. Small Documentation bug with AuthenticatedURL in hadoop-auth. Contributed by Tom Ellis.

This closes #97.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a1dccec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a1dccec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a1dccec

Branch: refs/heads/HDFS-1312
Commit: 8a1dcceccea5cfe9fb0c94daa2517cd64a16b3c0
Parents: 0bbb4dd
Author: Akira Ajisaka <aa...@apache.org>
Authored: Sat Jun 11 03:31:06 2016 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Sat Jun 11 03:32:21 2016 +0900

----------------------------------------------------------------------
 .../hadoop/security/authentication/client/AuthenticatedURL.java  | 4 ++--
 hadoop-common-project/hadoop-auth/src/site/markdown/Examples.md  | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a1dccec/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
index f87d9d8..6604c3f 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
@@ -44,14 +44,14 @@ import java.util.Map;
  * URL url = new URL("http://foo:8080/bar");
  * AuthenticatedURL.Token token = new AuthenticatedURL.Token();
  * AuthenticatedURL aUrl = new AuthenticatedURL();
- * HttpURLConnection conn = new AuthenticatedURL(url, token).openConnection();
+ * HttpURLConnection conn = new AuthenticatedURL().openConnection(url, token);
  * ....
  * // use the 'conn' instance
  * ....
  *
  * // establishing a follow up connection using a token from the previous connection
  *
- * HttpURLConnection conn = new AuthenticatedURL(url, token).openConnection();
+ * HttpURLConnection conn = new AuthenticatedURL().openConnection(url, token);
  * ....
  * // use the 'conn' instance
  * ....

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a1dccec/hadoop-common-project/hadoop-auth/src/site/markdown/Examples.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/site/markdown/Examples.md b/hadoop-common-project/hadoop-auth/src/site/markdown/Examples.md
index 7efb642..4dad79d 100644
--- a/hadoop-common-project/hadoop-auth/src/site/markdown/Examples.md
+++ b/hadoop-common-project/hadoop-auth/src/site/markdown/Examples.md
@@ -57,9 +57,9 @@ Use the `AuthenticatedURL` class to obtain an authenticated HTTP connection:
     URL url = new URL("http://localhost:8080/hadoop-auth/kerberos/who");
     AuthenticatedURL.Token token = new AuthenticatedURL.Token();
     ...
-    HttpURLConnection conn = new AuthenticatedURL(url, token).openConnection();
+    HttpURLConnection conn = new AuthenticatedURL().openConnection(url, token);
     ...
-    conn = new AuthenticatedURL(url, token).openConnection();
+    conn = new AuthenticatedURL().openConnection(url, token);
     ...
 
 Building and Running the Examples


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/25] hadoop git commit: YARN-3426. Add jdiff support to YARN. (vinodkv via wangda)

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/03fc6b1b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.7.2.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.7.2.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.7.2.xml
new file mode 100644
index 0000000..1a1d88b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.7.2.xml
@@ -0,0 +1,1801 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Thu May 12 17:47:55 PDT 2016 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="hadoop-yarn-server-common 2.7.2"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vi
 nodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.ja
 r:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanut
 ils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/re
 pository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/
 repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.7.2.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/com/google/inject/exten
 sions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/vinodkv/.m2/repository/com/google/inject/guice/3.0/guice-3.0.jar:/Users/vinodkv/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/vinodkv/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_45.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3
 .6.2.Final.jar:/Users/vinodkv/.m2/repository/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/site/jdiff/xml -apiname hadoop-yarn-server-common 2.7.2 -->
+<package name="org.apache.hadoop.yarn.server">
+  <!-- start class org.apache.hadoop.yarn.server.RMNMSecurityInfoClass -->
+  <class name="RMNMSecurityInfoClass" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RMNMSecurityInfoClass"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.RMNMSecurityInfoClass -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api">
+  <!-- start interface org.apache.hadoop.yarn.server.api.ResourceManagerConstants -->
+  <interface name="ResourceManagerConstants"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <field name="RM_INVALID_IDENTIFIER" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This states the invalid identifier of Resource Manager. This is used as a
+ default value for initializing RM identifier. Currently, RM is using time
+ stamp as RM identifier.]]>
+      </doc>
+    </field>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.server.api.ResourceManagerConstants -->
+  <!-- start interface org.apache.hadoop.yarn.server.api.ResourceTracker -->
+  <interface name="ResourceTracker"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="registerNodeManager" return="org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="nodeHeartbeat" return="org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.server.api.ResourceTracker -->
+  <!-- start interface org.apache.hadoop.yarn.server.api.ResourceTrackerPB -->
+  <interface name="ResourceTrackerPB"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.proto.ResourceTracker.ResourceTrackerService.BlockingInterface"/>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.server.api.ResourceTrackerPB -->
+  <!-- start interface org.apache.hadoop.yarn.server.api.SCMUploaderProtocolPB -->
+  <interface name="SCMUploaderProtocolPB"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.proto.SCMUploaderProtocol.SCMUploaderProtocolService.BlockingInterface"/>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.server.api.SCMUploaderProtocolPB -->
+  <!-- start class org.apache.hadoop.yarn.server.api.ServerRMProxy -->
+  <class name="ServerRMProxy" extends="org.apache.hadoop.yarn.client.RMProxy"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="createRMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="configuration" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a proxy to the ResourceManager for the specified protocol.
+ @param configuration Configuration with all the required information.
+ @param protocol Server protocol for which proxy is being requested.
+ @param <T> Type of proxy.
+ @return Proxy to the ResourceManager for the specified server protocol.
+ @throws IOException]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.ServerRMProxy -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.client">
+  <!-- start class org.apache.hadoop.yarn.server.api.impl.pb.client.ResourceTrackerPBClientImpl -->
+  <class name="ResourceTrackerPBClientImpl" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.server.api.ResourceTracker"/>
+    <implements name="java.io.Closeable"/>
+    <constructor name="ResourceTrackerPBClientImpl" type="long, java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="registerNodeManager" return="org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="nodeHeartbeat" return="org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.impl.pb.client.ResourceTrackerPBClientImpl -->
+  <!-- start class org.apache.hadoop.yarn.server.api.impl.pb.client.SCMUploaderProtocolPBClientImpl -->
+  <class name="SCMUploaderProtocolPBClientImpl" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.server.api.SCMUploaderProtocol"/>
+    <implements name="java.io.Closeable"/>
+    <constructor name="SCMUploaderProtocolPBClientImpl" type="long, java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="notify" return="org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="canUpload" return="org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderCanUploadResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderCanUploadRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.impl.pb.client.SCMUploaderProtocolPBClientImpl -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.service">
+  <!-- start class org.apache.hadoop.yarn.server.api.impl.pb.service.ResourceTrackerPBServiceImpl -->
+  <class name="ResourceTrackerPBServiceImpl" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.server.api.ResourceTrackerPB"/>
+    <constructor name="ResourceTrackerPBServiceImpl" type="org.apache.hadoop.yarn.server.api.ResourceTracker"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="registerNodeManager" return="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerResponseProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="controller" type="com.google.protobuf.RpcController"/>
+      <param name="proto" type="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto"/>
+      <exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
+    </method>
+    <method name="nodeHeartbeat" return="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="controller" type="com.google.protobuf.RpcController"/>
+      <param name="proto" type="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto"/>
+      <exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.impl.pb.service.ResourceTrackerPBServiceImpl -->
+  <!-- start class org.apache.hadoop.yarn.server.api.impl.pb.service.SCMUploaderProtocolPBServiceImpl -->
+  <class name="SCMUploaderProtocolPBServiceImpl" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.server.api.SCMUploaderProtocolPB"/>
+    <constructor name="SCMUploaderProtocolPBServiceImpl" type="org.apache.hadoop.yarn.server.api.SCMUploaderProtocol"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="notify" return="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderNotifyResponseProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="controller" type="com.google.protobuf.RpcController"/>
+      <param name="proto" type="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderNotifyRequestProto"/>
+      <exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
+    </method>
+    <method name="canUpload" return="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderCanUploadResponseProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="controller" type="com.google.protobuf.RpcController"/>
+      <param name="proto" type="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderCanUploadRequestProto"/>
+      <exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.impl.pb.service.SCMUploaderProtocolPBServiceImpl -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api.records">
+  <!-- start interface org.apache.hadoop.yarn.server.api.records.MasterKey -->
+  <interface name="MasterKey"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setKeyId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="keyId" type="int"/>
+    </method>
+    <method name="getBytes" return="java.nio.ByteBuffer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setBytes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bytes" type="java.nio.ByteBuffer"/>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.server.api.records.MasterKey -->
+  <!-- start class org.apache.hadoop.yarn.server.api.records.NodeAction -->
+  <class name="NodeAction" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.server.api.records.NodeAction[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.server.api.records.NodeAction"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[The NodeManager is instructed to perform the given action.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.records.NodeAction -->
+  <!-- start class org.apache.hadoop.yarn.server.api.records.NodeHealthStatus -->
+  <class name="NodeHealthStatus" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeHealthStatus"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getIsNodeHealthy" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Is the node healthy?
+ @return <code>true</code> if the node is healthy, else <code>false</code>]]>
+      </doc>
+    </method>
+    <method name="getHealthReport" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>diagnostic health report</em> of the node.
+ @return <em>diagnostic health report</em> of the node]]>
+      </doc>
+    </method>
+    <method name="getLastHealthReportTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>last timestamp</em> at which the health report was received.
+ @return <em>last timestamp</em> at which the health report was received]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@code NodeHealthStatus} is a summary of the health status of the node.
+ <p>
+ It includes information such as:
+ <ul>
+   <li>
+     An indicator of whether the node is healthy, as determined by the
+     health-check script.
+   </li>
+   <li>The previous time at which the health status was reported.</li>
+   <li>A diagnostic report on the health status.</li>
+ </ul>
+
+ @see NodeReport
+ @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.records.NodeHealthStatus -->
+  <!-- start class org.apache.hadoop.yarn.server.api.records.NodeStatus -->
+  <class name="NodeStatus" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeStatus"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.server.api.records.NodeStatus"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <param name="responseId" type="int"/>
+      <param name="containerStatuses" type="java.util.List"/>
+      <param name="keepAliveApplications" type="java.util.List"/>
+      <param name="nodeHealthStatus" type="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"/>
+    </method>
+    <method name="getNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getResponseId" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainersStatuses" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setContainersStatuses"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containersStatuses" type="java.util.List"/>
+    </method>
+    <method name="getKeepAliveApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setKeepAliveApplications"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appIds" type="java.util.List"/>
+    </method>
+    <method name="getNodeHealthStatus" return="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setNodeHealthStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="healthStatus" type="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"/>
+    </method>
+    <method name="setNodeId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+    </method>
+    <method name="setResponseId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseId" type="int"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.records.NodeStatus -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api.records.impl.pb">
+  <!-- start class org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl -->
+  <class name="MasterKeyPBImpl" extends="org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.server.api.records.MasterKey"/>
+    <constructor name="MasterKeyPBImpl"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="MasterKeyPBImpl" type="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getKeyId" return="int"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setKeyId"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="id" type="int"/>
+    </method>
+    <method name="getBytes" return="java.nio.ByteBuffer"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setBytes"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bytes" type="java.nio.ByteBuffer"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl -->
+  <!-- start class org.apache.hadoop.yarn.server.api.records.impl.pb.NodeHealthStatusPBImpl -->
+  <class name="NodeHealthStatusPBImpl" extends="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeHealthStatusPBImpl"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="NodeHealthStatusPBImpl" type="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProto"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getIsNodeHealthy" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setIsNodeHealthy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="isNodeHealthy" type="boolean"/>
+    </method>
+    <method name="getHealthReport" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setHealthReport"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="healthReport" type="java.lang.String"/>
+    </method>
+    <method name="getLastHealthReportTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setLastHealthReportTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="lastHealthReport" type="long"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.records.impl.pb.NodeHealthStatusPBImpl -->
+  <!-- start class org.apache.hadoop.yarn.server.api.records.impl.pb.NodeStatusPBImpl -->
+  <class name="NodeStatusPBImpl" extends="org.apache.hadoop.yarn.server.api.records.NodeStatus"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeStatusPBImpl"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="NodeStatusPBImpl" type="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="getResponseId" return="int"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setResponseId"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseId" type="int"/>
+    </method>
+    <method name="getNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setNodeId"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+    </method>
+    <method name="getContainersStatuses" return="java.util.List"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setContainersStatuses"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containers" type="java.util.List"/>
+    </method>
+    <method name="getKeepAliveApplications" return="java.util.List"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setKeepAliveApplications"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appIds" type="java.util.List"/>
+    </method>
+    <method name="getNodeHealthStatus" return="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setNodeHealthStatus"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="healthStatus" type="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.records.impl.pb.NodeStatusPBImpl -->
+</package>
+<package name="org.apache.hadoop.yarn.server.metrics">
+</package>
+<package name="org.apache.hadoop.yarn.server.records">
+</package>
+<package name="org.apache.hadoop.yarn.server.records.impl.pb">
+  <!-- start class org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl -->
+  <class name="VersionPBImpl" extends="org.apache.hadoop.yarn.server.records.Version"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="VersionPBImpl"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="VersionPBImpl" type="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getMajorVersion" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setMajorVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="major" type="int"/>
+    </method>
+    <method name="getMinorVersion" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setMinorVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="minor" type="int"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl -->
+</package>
+<package name="org.apache.hadoop.yarn.server.security.http">
+  <!-- start class org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer -->
+  <class name="RMAuthenticationFilterInitializer" extends="org.apache.hadoop.http.FilterInitializer"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RMAuthenticationFilterInitializer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createFilterConfig" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="initFilter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.http.FilterContainer"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer -->
+</package>
+<package name="org.apache.hadoop.yarn.server.sharedcache">
+</package>
+<package name="org.apache.hadoop.yarn.server.utils">
+  <!-- start class org.apache.hadoop.yarn.server.utils.BuilderUtils.ApplicationIdComparator -->
+  <class name="BuilderUtils.ApplicationIdComparator" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.Comparator"/>
+    <implements name="java.io.Serializable"/>
+    <constructor name="BuilderUtils.ApplicationIdComparator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="compare" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="a1" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="a2" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.BuilderUtils.ApplicationIdComparator -->
+  <!-- start class org.apache.hadoop.yarn.server.utils.BuilderUtils.ContainerIdComparator -->
+  <class name="BuilderUtils.ContainerIdComparator" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.Comparator"/>
+    <implements name="java.io.Serializable"/>
+    <constructor name="BuilderUtils.ContainerIdComparator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="compare" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="c1" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="c2" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.BuilderUtils.ContainerIdComparator -->
+  <!-- start class org.apache.hadoop.yarn.server.utils.LeveldbIterator -->
+  <class name="LeveldbIterator" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.Iterator"/>
+    <implements name="java.io.Closeable"/>
+    <constructor name="LeveldbIterator" type="org.iq80.leveldb.DB"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an iterator for the specified database]]>
+      </doc>
+    </constructor>
+    <constructor name="LeveldbIterator" type="org.iq80.leveldb.DB, org.iq80.leveldb.ReadOptions"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an iterator for the specified database]]>
+      </doc>
+    </constructor>
+    <constructor name="LeveldbIterator" type="org.iq80.leveldb.DBIterator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an iterator using the specified underlying DBIterator]]>
+      </doc>
+    </constructor>
+    <method name="seek"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="byte[]"/>
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Repositions the iterator so the key of the next BlockElement
+ returned greater than or equal to the specified targetKey.]]>
+      </doc>
+    </method>
+    <method name="seekToFirst"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Repositions the iterator so is is at the beginning of the Database.]]>
+      </doc>
+    </method>
+    <method name="seekToLast"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Repositions the iterator so it is at the end of of the Database.]]>
+      </doc>
+    </method>
+    <method name="hasNext" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Returns <tt>true</tt> if the iteration has more elements.]]>
+      </doc>
+    </method>
+    <method name="next" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Returns the next element in the iteration.]]>
+      </doc>
+    </method>
+    <method name="peekNext" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Returns the next element in the iteration, without advancing the
+ iteration.]]>
+      </doc>
+    </method>
+    <method name="hasPrev" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[@return true if there is a previous entry in the iteration.]]>
+      </doc>
+    </method>
+    <method name="prev" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[@return the previous element in the iteration and rewinds the iteration.]]>
+      </doc>
+    </method>
+    <method name="peekPrev" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[@return the previous element in the iteration, without rewinding the
+ iteration.]]>
+      </doc>
+    </method>
+    <method name="remove"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Removes from the database the last element returned by the iterator.]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Closes the iterator.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A wrapper for a DBIterator to translate the raw RuntimeExceptions that
+ can be thrown into DBExceptions.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.LeveldbIterator -->
+  <!-- start class org.apache.hadoop.yarn.server.utils.Lock -->
+  <class name="Lock"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.annotation.Annotation"/>
+    <doc>
+    <![CDATA[Annotation to document locking order.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.Lock -->
+  <!-- start class org.apache.hadoop.yarn.server.utils.Lock.NoLock -->
+  <class name="Lock.NoLock" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Lock.NoLock"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.Lock.NoLock -->
+  <!-- start class org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils -->
+  <class name="YarnServerBuilderUtils" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YarnServerBuilderUtils"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newNodeHeartbeatResponse" return="org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseId" type="int"/>
+      <param name="action" type="org.apache.hadoop.yarn.server.api.records.NodeAction"/>
+      <param name="containersToCleanUp" type="java.util.List"/>
+      <param name="applicationsToCleanUp" type="java.util.List"/>
+      <param name="containerTokenMasterKey" type="org.apache.hadoop.yarn.server.api.records.MasterKey"/>
+      <param name="nmTokenMasterKey" type="org.apache.hadoop.yarn.server.api.records.MasterKey"/>
+      <param name="nextHeartbeatInterval" type="long"/>
+    </method>
+    <doc>
+    <![CDATA[Server Builder utilities to construct various objects.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils -->
+</package>
+<package name="org.apache.hadoop.yarn.server.webapp">
+  <!-- start class org.apache.hadoop.yarn.server.webapp.AppAttemptBlock -->
+  <class name="AppAttemptBlock" extends="org.apache.hadoop.yarn.webapp.view.HtmlBlock"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppAttemptBlock" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol, org.apache.hadoop.yarn.webapp.View.ViewContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="render"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
+    </method>
+    <method name="generateOverview"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="appAttemptReport" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"/>
+      <param name="containers" type="java.util.Collection"/>
+      <param name="appAttempt" type="org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo"/>
+      <param name="node" type="java.lang.String"/>
+    </method>
+    <method name="hasAMContainer" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="containers" type="java.util.Collection"/>
+    </method>
+    <method name="createAttemptHeadRoomTable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
+    </method>
+    <method name="createTablesForAttemptMetrics"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
+    </method>
+    <field name="appBaseProt" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.AppAttemptBlock -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.AppBlock -->
+  <class name="AppBlock" extends="org.apache.hadoop.yarn.webapp.view.HtmlBlock"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppBlock" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol, org.apache.hadoop.yarn.webapp.View.ViewContext, org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="render"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
+    </method>
+    <method name="generateApplicationTable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
+      <param name="callerUGI" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="attempts" type="java.util.Collection"/>
+    </method>
+    <method name="createApplicationMetricsTable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
+    </method>
+    <field name="appBaseProt" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="conf" type="org.apache.hadoop.conf.Configuration"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="appID" type="org.apache.hadoop.yarn.api.records.ApplicationId"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.AppBlock -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.AppsBlock -->
+  <class name="AppsBlock" extends="org.apache.hadoop.yarn.webapp.view.HtmlBlock"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppsBlock" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol, org.apache.hadoop.yarn.webapp.View.ViewContext"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="fetchData"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="render"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
+    </method>
+    <method name="renderData"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
+    </method>
+    <field name="appBaseProt" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="reqAppStates" type="java.util.EnumSet"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="callerUGI" type="org.apache.hadoop.security.UserGroupInformation"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="appReports" type="java.util.Collection"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.AppsBlock -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.ContainerBlock -->
+  <class name="ContainerBlock" extends="org.apache.hadoop.yarn.webapp.view.HtmlBlock"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerBlock" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol, org.apache.hadoop.yarn.webapp.View.ViewContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="render"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
+    </method>
+    <field name="appBaseProt" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.ContainerBlock -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.WebPageUtils -->
+  <class name="WebPageUtils" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="WebPageUtils"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="appsTableInit" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="appsTableInit" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="isFairSchedulerPage" type="boolean"/>
+    </method>
+    <method name="attemptsTableInit" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="containersTableInit" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.WebPageUtils -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.WebServices -->
+  <class name="WebServices" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="WebServices" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getApps" return="org.apache.hadoop.yarn.server.webapp.dao.AppsInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
+      <param name="stateQuery" type="java.lang.String"/>
+      <param name="statesQuery" type="java.util.Set"/>
+      <param name="finalStatusQuery" type="java.lang.String"/>
+      <param name="userQuery" type="java.lang.String"/>
+      <param name="queueQuery" type="java.lang.String"/>
+      <param name="count" type="java.lang.String"/>
+      <param name="startedBegin" type="java.lang.String"/>
+      <param name="startedEnd" type="java.lang.String"/>
+      <param name="finishBegin" type="java.lang.String"/>
+      <param name="finishEnd" type="java.lang.String"/>
+      <param name="applicationTypes" type="java.util.Set"/>
+    </method>
+    <method name="getApp" return="org.apache.hadoop.yarn.server.webapp.dao.AppInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
+      <param name="appId" type="java.lang.String"/>
+    </method>
+    <method name="getAppAttempts" return="org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
+      <param name="appId" type="java.lang.String"/>
+    </method>
+    <method name="getAppAttempt" return="org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
+      <param name="appId" type="java.lang.String"/>
+      <param name="appAttemptId" type="java.lang.String"/>
+    </method>
+    <method name="getContainers" return="org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
+      <param name="appId" type="java.lang.String"/>
+      <param name="appAttemptId" type="java.lang.String"/>
+    </method>
+    <method name="getContainer" return="org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
+      <param name="appId" type="java.lang.String"/>
+      <param name="appAttemptId" type="java.lang.String"/>
+      <param name="containerId" type="java.lang.String"/>
+    </method>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+    </method>
+    <method name="parseQueries" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="queries" type="java.util.Set"/>
+      <param name="isState" type="boolean"/>
+    </method>
+    <method name="parseApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="appId" type="java.lang.String"/>
+    </method>
+    <method name="parseApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="java.lang.String"/>
+    </method>
+    <method name="parseContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="containerId" type="java.lang.String"/>
+    </method>
+    <method name="validateIds"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+    </method>
+    <field name="appBaseProt" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.WebServices -->
+</package>
+<package name="org.apache.hadoop.yarn.server.webapp.dao">
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo -->
+  <class name="AppAttemptInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppAttemptInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AppAttemptInfo" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAppAttemptId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getHost" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRpcPort" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getOriginalTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDiagnosticsInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAppAttemptState" return="org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAmContainerId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="appAttemptId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="host" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="rpcPort" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="trackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="originalTrackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="diagnosticsInfo" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="appAttemptState" type="org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="amContainerId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo -->
+  <class name="AppAttemptsInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppAttemptsInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo"/>
+    </method>
+    <method name="getAttempts" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="attempt" type="java.util.ArrayList"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppInfo -->
+  <class name="AppInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AppInfo" type="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAppId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getCurrentAppAttemptId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getQueue" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getType" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getHost" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRpcPort" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAppState" return="org.apache.hadoop.yarn.api.records.YarnApplicationState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDiagnosticsInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getOriginalTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinalAppStatus" return="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getSubmittedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStartedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinishedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getElapsedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationTags" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="appId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="currentAppAttemptId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="user" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="name" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="queue" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="type" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="host" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="rpcPort" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="appState" type="org.apache.hadoop.yarn.api.records.YarnApplicationState"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="progress" type="float"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="diagnosticsInfo" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="originalTrackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="trackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="finalAppStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="submittedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="startedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="finishedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="elapsedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="applicationTags" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppsInfo -->
+  <class name="AppsInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppsInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appinfo" type="org.apache.hadoop.yarn.server.webapp.dao.AppInfo"/>
+    </method>
+    <method name="getApps" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="app" type="java.util.ArrayList"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppsInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo -->
+  <class name="ContainerInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerInfo" type="org.apache.hadoop.yarn.api.records.ContainerReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getContainerId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedMB" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedVCores" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAssignedNodeId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPriority" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStartedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinishedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getElapsedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDiagnosticsInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprec

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/25] hadoop git commit: YARN-3426. Add jdiff support to YARN. (vinodkv via wangda)

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/03fc6b1b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.6.0.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.6.0.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.6.0.xml
new file mode 100644
index 0000000..094962f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.6.0.xml
@@ -0,0 +1,2059 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Wed Apr 08 11:30:15 PDT 2015 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="hadoop-yarn-server-common 2.6.0"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/hadoop-annotations.jar:/Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/jdiff.jar -verbose -classpath /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/classes:/Users/llu/.m2/repository/org/apache/hadoop/hadoop-common/2.6.0/hadoop-common-2.6.0.jar:/Users/llu/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/llu/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/llu/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/llu/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/llu/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users
 /llu/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/llu/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/llu/.m2/repository/commons-collections/commons-collections/3.2.1/commons-collections-3.2.1.jar:/Users/llu/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/llu/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/llu/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/llu/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/llu/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/llu/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/llu/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Use
 rs/llu/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/llu/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/llu/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/llu/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/llu/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/llu/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/llu/.m2/repository/org/slf4j/slf4j-api/1.7.5/slf4j-api-1.7.5.jar:/Users/llu/.m2/repository/org/slf4j/slf4j-log4j12/1.
 7.5/slf4j-log4j12-1.7.5.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/llu/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/llu/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/llu/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/llu/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/llu/.m2/repository/org/apache/hadoop/hadoop-auth/2.6.0/hadoop-auth-2.6.0.jar:/Users/llu/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/llu/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/llu/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/llu/.m2/repository/org/apache
 /directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/llu/.m2/repository/org/apache/curator/curator-framework/2.6.0/curator-framework-2.6.0.jar:/Users/llu/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/llu/.m2/repository/org/apache/curator/curator-client/2.6.0/curator-client-2.6.0.jar:/Users/llu/.m2/repository/org/apache/curator/curator-recipes/2.6.0/curator-recipes-2.6.0.jar:/Users/llu/.m2/repository/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar:/Users/llu/.m2/repository/org/htrace/htrace-core/3.0.4/htrace-core-3.0.4.jar:/Users/llu/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/llu/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.6.0.jar:/Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.6.0.jar:/Users/llu/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:
 /Users/llu/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/llu/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/llu/.m2/repository/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/llu/.m2/repository/com/google/inject/guice/3.0/guice-3.0.jar:/Users/llu/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/llu/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/llu/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/llu/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/llu/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/llu/.m2/reposi
 tory/org/apache/hadoop/hadoop-annotations/2.6.0/hadoop-annotations-2.6.0.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_67.jdk/Contents/Home/lib/tools.jar:/Users/llu/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/llu/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/llu/.m2/repository/jline/jline/0.9.94/jline-0.9.94.jar:/Users/llu/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/llu/.m2/repository/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar -sourcepath /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java -apidir /Users/llu/hadoop2_6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/site/jdiff/xml -apiname hadoop-yarn-server-common 2.6.0 -->
+<package name="org.apache.hadoop.yarn.server">
+  <!-- start class org.apache.hadoop.yarn.server.RMNMSecurityInfoClass -->
+  <class name="RMNMSecurityInfoClass" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RMNMSecurityInfoClass"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.RMNMSecurityInfoClass -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api">
+  <!-- start interface org.apache.hadoop.yarn.server.api.ApplicationContext -->
+  <interface name="ApplicationContext"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getApplication" return="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method returns Application {@link ApplicationReport} for the specified
+ {@link ApplicationId}.
+
+ @param appId
+
+ @return {@link ApplicationReport} for the ApplicationId.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAllApplications" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method returns all Application {@link ApplicationReport}s
+
+ @return map of {@link ApplicationId} to {@link ApplicationReport}s.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttempts" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Application can have multiple application attempts
+ {@link ApplicationAttemptReport}. This method returns the all
+ {@link ApplicationAttemptReport}s for the Application.
+
+ @param appId
+
+ @return all {@link ApplicationAttemptReport}s for the Application.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttempt" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method returns {@link ApplicationAttemptReport} for specified
+ {@link ApplicationId}.
+
+ @param appAttemptId
+          {@link ApplicationAttemptId}
+ @return {@link ApplicationAttemptReport} for ApplicationAttemptId
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainer" return="org.apache.hadoop.yarn.api.records.ContainerReport"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method returns {@link ContainerReport} for specified
+ {@link ContainerId}.
+
+ @param containerId
+          {@link ContainerId}
+ @return {@link ContainerReport} for ContainerId
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAMContainer" return="org.apache.hadoop.yarn.api.records.ContainerReport"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method returns {@link ContainerReport} for specified
+ {@link ApplicationAttemptId}.
+
+ @param appAttemptId
+          {@link ApplicationAttemptId}
+ @return {@link ContainerReport} for ApplicationAttemptId
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainers" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method returns Map of {@link ContainerId} to {@link ContainerReport}
+ for specified {@link ApplicationAttemptId}.
+
+ @param appAttemptId
+          {@link ApplicationAttemptId}
+ @return Map of {@link ContainerId} to {@link ContainerReport} for
+         ApplicationAttemptId
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.server.api.ApplicationContext -->
+  <!-- start interface org.apache.hadoop.yarn.server.api.ResourceManagerConstants -->
+  <interface name="ResourceManagerConstants"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <field name="RM_INVALID_IDENTIFIER" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This states the invalid identifier of Resource Manager. This is used as a
+ default value for initializing RM identifier. Currently, RM is using time
+ stamp as RM identifier.]]>
+      </doc>
+    </field>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.server.api.ResourceManagerConstants -->
+  <!-- start interface org.apache.hadoop.yarn.server.api.ResourceTracker -->
+  <interface name="ResourceTracker"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="registerNodeManager" return="org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="nodeHeartbeat" return="org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.server.api.ResourceTracker -->
+  <!-- start interface org.apache.hadoop.yarn.server.api.ResourceTrackerPB -->
+  <interface name="ResourceTrackerPB"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.proto.ResourceTracker.ResourceTrackerService.BlockingInterface"/>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.server.api.ResourceTrackerPB -->
+  <!-- start class org.apache.hadoop.yarn.server.api.ServerRMProxy -->
+  <class name="ServerRMProxy" extends="org.apache.hadoop.yarn.client.RMProxy"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="createRMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="configuration" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a proxy to the ResourceManager for the specified protocol.
+ @param configuration Configuration with all the required information.
+ @param protocol Server protocol for which proxy is being requested.
+ @param <T> Type of proxy.
+ @return Proxy to the ResourceManager for the specified server protocol.
+ @throws IOException]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.ServerRMProxy -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.client">
+  <!-- start class org.apache.hadoop.yarn.server.api.impl.pb.client.ResourceTrackerPBClientImpl -->
+  <class name="ResourceTrackerPBClientImpl" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.server.api.ResourceTracker"/>
+    <implements name="java.io.Closeable"/>
+    <constructor name="ResourceTrackerPBClientImpl" type="long, java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="registerNodeManager" return="org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="nodeHeartbeat" return="org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.impl.pb.client.ResourceTrackerPBClientImpl -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.service">
+  <!-- start class org.apache.hadoop.yarn.server.api.impl.pb.service.ResourceTrackerPBServiceImpl -->
+  <class name="ResourceTrackerPBServiceImpl" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.server.api.ResourceTrackerPB"/>
+    <constructor name="ResourceTrackerPBServiceImpl" type="org.apache.hadoop.yarn.server.api.ResourceTracker"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="registerNodeManager" return="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerResponseProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="controller" type="com.google.protobuf.RpcController"/>
+      <param name="proto" type="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto"/>
+      <exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
+    </method>
+    <method name="nodeHeartbeat" return="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="controller" type="com.google.protobuf.RpcController"/>
+      <param name="proto" type="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto"/>
+      <exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.impl.pb.service.ResourceTrackerPBServiceImpl -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api.records">
+  <!-- start interface org.apache.hadoop.yarn.server.api.records.MasterKey -->
+  <interface name="MasterKey"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setKeyId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="keyId" type="int"/>
+    </method>
+    <method name="getBytes" return="java.nio.ByteBuffer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setBytes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bytes" type="java.nio.ByteBuffer"/>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.server.api.records.MasterKey -->
+  <!-- start class org.apache.hadoop.yarn.server.api.records.NodeAction -->
+  <class name="NodeAction" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.server.api.records.NodeAction[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.server.api.records.NodeAction"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[The NodeManager is instructed to perform the given action.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.records.NodeAction -->
+  <!-- start class org.apache.hadoop.yarn.server.api.records.NodeHealthStatus -->
+  <class name="NodeHealthStatus" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeHealthStatus"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getIsNodeHealthy" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Is the node healthy?
+ @return <code>true</code> if the node is healthy, else <code>false</code>]]>
+      </doc>
+    </method>
+    <method name="getHealthReport" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>diagnostic health report</em> of the node.
+ @return <em>diagnostic health report</em> of the node]]>
+      </doc>
+    </method>
+    <method name="getLastHealthReportTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>last timestamp</em> at which the health report was received.
+ @return <em>last timestamp</em> at which the health report was received]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p><code>NodeHealthStatus</code> is a summary of the health status of the
+ node.</p>
+
+ <p>It includes information such as:
+   <ul>
+     <li>
+       An indicator of whether the node is healthy, as determined by the
+       health-check script.
+     </li>
+     <li>The previous time at which the health status was reported.</li>
+     <li>A diagnostic report on the health status.</li>
+   </ul>
+ </p>
+
+ @see NodeReport
+ @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.records.NodeHealthStatus -->
+  <!-- start class org.apache.hadoop.yarn.server.api.records.NodeStatus -->
+  <class name="NodeStatus" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeStatus"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.server.api.records.NodeStatus"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <param name="responseId" type="int"/>
+      <param name="containerStatuses" type="java.util.List"/>
+      <param name="keepAliveApplications" type="java.util.List"/>
+      <param name="nodeHealthStatus" type="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"/>
+    </method>
+    <method name="getNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getResponseId" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainersStatuses" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setContainersStatuses"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containersStatuses" type="java.util.List"/>
+    </method>
+    <method name="getKeepAliveApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setKeepAliveApplications"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appIds" type="java.util.List"/>
+    </method>
+    <method name="getNodeHealthStatus" return="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setNodeHealthStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="healthStatus" type="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"/>
+    </method>
+    <method name="setNodeId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+    </method>
+    <method name="setResponseId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseId" type="int"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.records.NodeStatus -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api.records.impl.pb">
+  <!-- start class org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl -->
+  <class name="MasterKeyPBImpl" extends="org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.server.api.records.MasterKey"/>
+    <constructor name="MasterKeyPBImpl"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="MasterKeyPBImpl" type="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getKeyId" return="int"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setKeyId"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="id" type="int"/>
+    </method>
+    <method name="getBytes" return="java.nio.ByteBuffer"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setBytes"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bytes" type="java.nio.ByteBuffer"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl -->
+  <!-- start class org.apache.hadoop.yarn.server.api.records.impl.pb.NodeHealthStatusPBImpl -->
+  <class name="NodeHealthStatusPBImpl" extends="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeHealthStatusPBImpl"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="NodeHealthStatusPBImpl" type="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProto"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getIsNodeHealthy" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setIsNodeHealthy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="isNodeHealthy" type="boolean"/>
+    </method>
+    <method name="getHealthReport" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setHealthReport"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="healthReport" type="java.lang.String"/>
+    </method>
+    <method name="getLastHealthReportTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setLastHealthReportTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="lastHealthReport" type="long"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.records.impl.pb.NodeHealthStatusPBImpl -->
+  <!-- start class org.apache.hadoop.yarn.server.api.records.impl.pb.NodeStatusPBImpl -->
+  <class name="NodeStatusPBImpl" extends="org.apache.hadoop.yarn.server.api.records.NodeStatus"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeStatusPBImpl"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="NodeStatusPBImpl" type="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="getResponseId" return="int"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setResponseId"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseId" type="int"/>
+    </method>
+    <method name="getNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setNodeId"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+    </method>
+    <method name="getContainersStatuses" return="java.util.List"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setContainersStatuses"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containers" type="java.util.List"/>
+    </method>
+    <method name="getKeepAliveApplications" return="java.util.List"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setKeepAliveApplications"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appIds" type="java.util.List"/>
+    </method>
+    <method name="getNodeHealthStatus" return="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setNodeHealthStatus"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="healthStatus" type="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.records.impl.pb.NodeStatusPBImpl -->
+</package>
+<package name="org.apache.hadoop.yarn.server.metrics">
+</package>
+<package name="org.apache.hadoop.yarn.server.records">
+</package>
+<package name="org.apache.hadoop.yarn.server.records.impl.pb">
+  <!-- start class org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl -->
+  <class name="VersionPBImpl" extends="org.apache.hadoop.yarn.server.records.Version"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="VersionPBImpl"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="VersionPBImpl" type="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getMajorVersion" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setMajorVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="major" type="int"/>
+    </method>
+    <method name="getMinorVersion" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setMinorVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="minor" type="int"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl -->
+</package>
+<package name="org.apache.hadoop.yarn.server.security.http">
+  <!-- start class org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer -->
+  <class name="RMAuthenticationFilterInitializer" extends="org.apache.hadoop.http.FilterInitializer"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RMAuthenticationFilterInitializer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createFilterConfig" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="initFilter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.http.FilterContainer"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer -->
+</package>
+<package name="org.apache.hadoop.yarn.server.utils">
+  <!-- start class org.apache.hadoop.yarn.server.utils.BuilderUtils -->
+  <class name="BuilderUtils" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="BuilderUtils"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newLocalResource" return="org.apache.hadoop.yarn.api.records.LocalResource"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="org.apache.hadoop.yarn.api.records.URL"/>
+      <param name="type" type="org.apache.hadoop.yarn.api.records.LocalResourceType"/>
+      <param name="visibility" type="org.apache.hadoop.yarn.api.records.LocalResourceVisibility"/>
+      <param name="size" type="long"/>
+      <param name="timestamp" type="long"/>
+    </method>
+    <method name="newLocalResource" return="org.apache.hadoop.yarn.api.records.LocalResource"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <param name="type" type="org.apache.hadoop.yarn.api.records.LocalResourceType"/>
+      <param name="visibility" type="org.apache.hadoop.yarn.api.records.LocalResourceVisibility"/>
+      <param name="size" type="long"/>
+      <param name="timestamp" type="long"/>
+    </method>
+    <method name="newApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="recordFactory" type="org.apache.hadoop.yarn.factories.RecordFactory"/>
+      <param name="clustertimestamp" type="long"/>
+      <param name="id" type="java.lang.CharSequence"/>
+    </method>
+    <method name="newApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="recordFactory" type="org.apache.hadoop.yarn.factories.RecordFactory"/>
+      <param name="clusterTimeStamp" type="long"/>
+      <param name="id" type="int"/>
+    </method>
+    <method name="newApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="clusterTimeStamp" type="long"/>
+      <param name="id" type="int"/>
+    </method>
+    <method name="newApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="attemptId" type="int"/>
+    </method>
+    <method name="convert" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="clustertimestamp" type="long"/>
+      <param name="id" type="java.lang.CharSequence"/>
+    </method>
+    <method name="newContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="containerId" type="long"/>
+    </method>
+    <method name="newContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="int"/>
+      <param name="appAttemptId" type="int"/>
+      <param name="timestamp" type="long"/>
+      <param name="id" type="long"/>
+    </method>
+    <method name="newContainerToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="host" type="java.lang.String"/>
+      <param name="port" type="int"/>
+      <param name="user" type="java.lang.String"/>
+      <param name="r" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="expiryTime" type="long"/>
+      <param name="masterKeyId" type="int"/>
+      <param name="password" type="byte[]"/>
+      <param name="rmIdentifier" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="newContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="recordFactory" type="org.apache.hadoop.yarn.factories.RecordFactory"/>
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="containerId" type="int"/>
+    </method>
+    <method name="newNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="host" type="java.lang.String"/>
+      <param name="port" type="int"/>
+    </method>
+    <method name="newNodeReport" return="org.apache.hadoop.yarn.api.records.NodeReport"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <param name="nodeState" type="org.apache.hadoop.yarn.api.records.NodeState"/>
+      <param name="httpAddress" type="java.lang.String"/>
+      <param name="rackName" type="java.lang.String"/>
+      <param name="used" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="numContainers" type="int"/>
+      <param name="healthReport" type="java.lang.String"/>
+      <param name="lastHealthReportTime" type="long"/>
+    </method>
+    <method name="newNodeReport" return="org.apache.hadoop.yarn.api.records.NodeReport"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <param name="nodeState" type="org.apache.hadoop.yarn.api.records.NodeState"/>
+      <param name="httpAddress" type="java.lang.String"/>
+      <param name="rackName" type="java.lang.String"/>
+      <param name="used" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="numContainers" type="int"/>
+      <param name="healthReport" type="java.lang.String"/>
+      <param name="lastHealthReportTime" type="long"/>
+      <param name="nodeLabels" type="java.util.Set"/>
+    </method>
+    <method name="newContainerStatus" return="org.apache.hadoop.yarn.api.records.ContainerStatus"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="containerState" type="org.apache.hadoop.yarn.api.records.ContainerState"/>
+      <param name="diagnostics" type="java.lang.String"/>
+      <param name="exitStatus" type="int"/>
+    </method>
+    <method name="newContainer" return="org.apache.hadoop.yarn.api.records.Container"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <param name="nodeHttpAddress" type="java.lang.String"/>
+      <param name="resource" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="containerToken" type="org.apache.hadoop.yarn.api.records.Token"/>
+    </method>
+    <method name="newToken" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tokenClass" type="java.lang.Class"/>
+      <param name="identifier" type="byte[]"/>
+      <param name="kind" type="java.lang.String"/>
+      <param name="password" type="byte[]"/>
+      <param name="service" type="java.lang.String"/>
+    </method>
+    <method name="newDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="identifier" type="byte[]"/>
+      <param name="kind" type="java.lang.String"/>
+      <param name="password" type="byte[]"/>
+      <param name="service" type="java.lang.String"/>
+    </method>
+    <method name="newClientToAMToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="identifier" type="byte[]"/>
+      <param name="kind" type="java.lang.String"/>
+      <param name="password" type="byte[]"/>
+      <param name="service" type="java.lang.String"/>
+    </method>
+    <method name="newAMRMToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="identifier" type="byte[]"/>
+      <param name="kind" type="java.lang.String"/>
+      <param name="password" type="byte[]"/>
+      <param name="service" type="java.lang.String"/>
+    </method>
+    <method name="newContainerTokenIdentifier" return="org.apache.hadoop.yarn.security.ContainerTokenIdentifier"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerToken" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="newContainerLaunchContext" return="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="localResources" type="java.util.Map"/>
+      <param name="environment" type="java.util.Map"/>
+      <param name="commands" type="java.util.List"/>
+      <param name="serviceData" type="java.util.Map"/>
+      <param name="tokens" type="java.nio.ByteBuffer"/>
+      <param name="acls" type="java.util.Map"/>
+    </method>
+    <method name="newPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="int"/>
+    </method>
+    <method name="newResourceRequest" return="org.apache.hadoop.yarn.api.records.ResourceRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="hostName" type="java.lang.String"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="numContainers" type="int"/>
+    </method>
+    <method name="newResourceRequest" return="org.apache.hadoop.yarn.api.records.ResourceRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="r" type="org.apache.hadoop.yarn.api.records.ResourceRequest"/>
+    </method>
+    <method name="newApplicationReport" return="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="user" type="java.lang.String"/>
+      <param name="queue" type="java.lang.String"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="host" type="java.lang.String"/>
+      <param name="rpcPort" type="int"/>
+      <param name="clientToAMToken" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <param name="state" type="org.apache.hadoop.yarn.api.records.YarnApplicationState"/>
+      <param name="diagnostics" type="java.lang.String"/>
+      <param name="url" type="java.lang.String"/>
+      <param name="startTime" type="long"/>
+      <param name="finishTime" type="long"/>
+      <param name="finalStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <param name="appResources" type="org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport"/>
+      <param name="origTrackingUrl" type="java.lang.String"/>
+      <param name="progress" type="float"/>
+      <param name="appType" type="java.lang.String"/>
+      <param name="amRmToken" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <param name="tags" type="java.util.Set"/>
+    </method>
+    <method name="newApplicationSubmissionContext" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="applicationName" type="java.lang.String"/>
+      <param name="queue" type="java.lang.String"/>
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="amContainer" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <param name="isUnmanagedAM" type="boolean"/>
+      <param name="cancelTokensWhenComplete" type="boolean"/>
+      <param name="maxAppAttempts" type="int"/>
+      <param name="resource" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="applicationType" type="java.lang.String"/>
+    </method>
+    <method name="newApplicationSubmissionContext" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="applicationName" type="java.lang.String"/>
+      <param name="queue" type="java.lang.String"/>
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="amContainer" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <param name="isUnmanagedAM" type="boolean"/>
+      <param name="cancelTokensWhenComplete" type="boolean"/>
+      <param name="maxAppAttempts" type="int"/>
+      <param name="resource" type="org.apache.hadoop.yarn.api.records.Resource"/>
+    </method>
+    <method name="newApplicationResourceUsageReport" return="org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numUsedContainers" type="int"/>
+      <param name="numReservedContainers" type="int"/>
+      <param name="usedResources" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="reservedResources" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="neededResources" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="memorySeconds" type="long"/>
+      <param name="vcoreSeconds" type="long"/>
+    </method>
+    <method name="newResource" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="memory" type="int"/>
+      <param name="vCores" type="int"/>
+    </method>
+    <method name="newURL" return="org.apache.hadoop.yarn.api.records.URL"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scheme" type="java.lang.String"/>
+      <param name="host" type="java.lang.String"/>
+      <param name="port" type="int"/>
+      <param name="file" type="java.lang.String"/>
+    </method>
+    <method name="newAllocateResponse" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseId" type="int"/>
+      <param name="completedContainers" type="java.util.List"/>
+      <param name="allocatedContainers" type="java.util.List"/>
+      <param name="updatedNodes" type="java.util.List"/>
+      <param name="availResources" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="command" type="org.apache.hadoop.yarn.api.records.AMCommand"/>
+      <param name="numClusterNodes" type="int"/>
+      <param name="preempt" type="org.apache.hadoop.yarn.api.records.PreemptionMessage"/>
+    </method>
+    <doc>
+    <![CDATA[Builder utilities to construct various objects.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.BuilderUtils -->
+  <!-- start class org.apache.hadoop.yarn.server.utils.BuilderUtils.ApplicationIdComparator -->
+  <class name="BuilderUtils.ApplicationIdComparator" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.Comparator"/>
+    <implements name="java.io.Serializable"/>
+    <constructor name="BuilderUtils.ApplicationIdComparator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="compare" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="a1" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="a2" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.BuilderUtils.ApplicationIdComparator -->
+  <!-- start class org.apache.hadoop.yarn.server.utils.BuilderUtils.ContainerIdComparator -->
+  <class name="BuilderUtils.ContainerIdComparator" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.Comparator"/>
+    <implements name="java.io.Serializable"/>
+    <constructor name="BuilderUtils.ContainerIdComparator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="compare" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="c1" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="c2" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.BuilderUtils.ContainerIdComparator -->
+  <!-- start class org.apache.hadoop.yarn.server.utils.LeveldbIterator -->
+  <class name="LeveldbIterator" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.Iterator"/>
+    <implements name="java.io.Closeable"/>
+    <constructor name="LeveldbIterator" type="org.iq80.leveldb.DB"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an iterator for the specified database]]>
+      </doc>
+    </constructor>
+    <constructor name="LeveldbIterator" type="org.iq80.leveldb.DB, org.iq80.leveldb.ReadOptions"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an iterator for the specified database]]>
+      </doc>
+    </constructor>
+    <constructor name="LeveldbIterator" type="org.iq80.leveldb.DBIterator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an iterator using the specified underlying DBIterator]]>
+      </doc>
+    </constructor>
+    <method name="seek"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="byte[]"/>
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Repositions the iterator so the key of the next BlockElement
+ returned greater than or equal to the specified targetKey.]]>
+      </doc>
+    </method>
+    <method name="seekToFirst"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Repositions the iterator so is is at the beginning of the Database.]]>
+      </doc>
+    </method>
+    <method name="seekToLast"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Repositions the iterator so it is at the end of of the Database.]]>
+      </doc>
+    </method>
+    <method name="hasNext" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Returns <tt>true</tt> if the iteration has more elements.]]>
+      </doc>
+    </method>
+    <method name="next" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Returns the next element in the iteration.]]>
+      </doc>
+    </method>
+    <method name="peekNext" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Returns the next element in the iteration, without advancing the
+ iteration.]]>
+      </doc>
+    </method>
+    <method name="hasPrev" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[@return true if there is a previous entry in the iteration.]]>
+      </doc>
+    </method>
+    <method name="prev" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[@return the previous element in the iteration and rewinds the iteration.]]>
+      </doc>
+    </method>
+    <method name="peekPrev" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[@return the previous element in the iteration, without rewinding the
+ iteration.]]>
+      </doc>
+    </method>
+    <method name="remove"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Removes from the database the last element returned by the iterator.]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Closes the iterator.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A wrapper for a DBIterator to translate the raw RuntimeExceptions that
+ can be thrown into DBExceptions.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.LeveldbIterator -->
+  <!-- start class org.apache.hadoop.yarn.server.utils.Lock -->
+  <class name="Lock"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.annotation.Annotation"/>
+    <doc>
+    <![CDATA[Annotation to document locking order.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.Lock -->
+  <!-- start class org.apache.hadoop.yarn.server.utils.Lock.NoLock -->
+  <class name="Lock.NoLock" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Lock.NoLock"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.Lock.NoLock -->
+  <!-- start class org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils -->
+  <class name="YarnServerBuilderUtils" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YarnServerBuilderUtils"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newNodeHeartbeatResponse" return="org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseId" type="int"/>
+      <param name="action" type="org.apache.hadoop.yarn.server.api.records.NodeAction"/>
+      <param name="containersToCleanUp" type="java.util.List"/>
+      <param name="applicationsToCleanUp" type="java.util.List"/>
+      <param name="containerTokenMasterKey" type="org.apache.hadoop.yarn.server.api.records.MasterKey"/>
+      <param name="nmTokenMasterKey" type="org.apache.hadoop.yarn.server.api.records.MasterKey"/>
+      <param name="nextHeartbeatInterval" type="long"/>
+    </method>
+    <doc>
+    <![CDATA[Server Builder utilities to construct various objects.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils -->
+</package>
+<package name="org.apache.hadoop.yarn.server.webapp">
+  <!-- start class org.apache.hadoop.yarn.server.webapp.AppAttemptBlock -->
+  <class name="AppAttemptBlock" extends="org.apache.hadoop.yarn.webapp.view.HtmlBlock"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppAttemptBlock" type="org.apache.hadoop.yarn.server.api.ApplicationContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="render"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.AppAttemptBlock -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.AppBlock -->
+  <class name="AppBlock" extends="org.apache.hadoop.yarn.webapp.view.HtmlBlock"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="render"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
+    </method>
+    <field name="appContext" type="org.apache.hadoop.yarn.server.api.ApplicationContext"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.AppBlock -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.AppsBlock -->
+  <class name="AppsBlock" extends="org.apache.hadoop.yarn.webapp.view.HtmlBlock"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="render"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
+    </method>
+    <field name="appContext" type="org.apache.hadoop.yarn.server.api.ApplicationContext"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.AppsBlock -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.ContainerBlock -->
+  <class name="ContainerBlock" extends="org.apache.hadoop.yarn.webapp.view.HtmlBlock"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerBlock" type="org.apache.hadoop.yarn.server.api.ApplicationContext, org.apache.hadoop.yarn.webapp.View.ViewContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="render"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.ContainerBlock -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.WebServices -->
+  <class name="WebServices" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="WebServices" type="org.apache.hadoop.yarn.server.api.ApplicationContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getApps" return="org.apache.hadoop.yarn.server.webapp.dao.AppsInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
+      <param name="stateQuery" type="java.lang.String"/>
+      <param name="statesQuery" type="java.util.Set"/>
+      <param name="finalStatusQuery" type="java.lang.String"/>
+      <param name="userQuery" type="java.lang.String"/>
+      <param name="queueQuery" type="java.lang.String"/>
+      <param name="count" type="java.lang.String"/>
+      <param name="startedBegin" type="java.lang.String"/>
+      <param name="startedEnd" type="java.lang.String"/>
+      <param name="finishBegin" type="java.lang.String"/>
+      <param name="finishEnd" type="java.lang.String"/>
+      <param name="applicationTypes" type="java.util.Set"/>
+    </method>
+    <method name="getApp" return="org.apache.hadoop.yarn.server.webapp.dao.AppInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
+      <param name="appId" type="java.lang.String"/>
+    </method>
+    <method name="getAppAttempts" return="org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
+      <param name="appId" type="java.lang.String"/>
+    </method>
+    <method name="getAppAttempt" return="org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
+      <param name="appId" type="java.lang.String"/>
+      <param name="appAttemptId" type="java.lang.String"/>
+    </method>
+    <method name="getContainers" return="org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
+      <param name="appId" type="java.lang.String"/>
+      <param name="appAttemptId" type="java.lang.String"/>
+    </method>
+    <method name="getContainer" return="org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
+      <param name="appId" type="java.lang.String"/>
+      <param name="appAttemptId" type="java.lang.String"/>
+      <param name="containerId" type="java.lang.String"/>
+    </method>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+    </method>
+    <method name="parseQueries" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="queries" type="java.util.Set"/>
+      <param name="isState" type="boolean"/>
+    </method>
+    <method name="parseApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="appId" type="java.lang.String"/>
+    </method>
+    <method name="parseApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="java.lang.String"/>
+    </method>
+    <method name="parseContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="containerId" type="java.lang.String"/>
+    </method>
+    <method name="validateIds"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+    </method>
+    <field name="appContext" type="org.apache.hadoop.yarn.server.api.ApplicationContext"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.WebServices -->
+</package>
+<package name="org.apache.hadoop.yarn.server.webapp.dao">
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo -->
+  <class name="AppAttemptInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppAttemptInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AppAttemptInfo" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAppAttemptId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getHost" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRpcPort" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getOriginalTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDiagnosticsInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAppAttemptState" return="org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAmContainerId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="appAttemptId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="host" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="rpcPort" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="trackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="originalTrackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="diagnosticsInfo" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="appAttemptState" type="org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="amContainerId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protect

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/25] hadoop git commit: HDFS-7987. Allow files / directories to be moved (Ravi Prakash via aw)

Posted by ae...@apache.org.
HDFS-7987. Allow files / directories to be moved (Ravi Prakash via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d44f4745
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d44f4745
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d44f4745

Branch: refs/heads/HDFS-1312
Commit: d44f4745b4a186dd06dd6837a85ad90a237d7d97
Parents: 0b7b8a3
Author: Allen Wittenauer <aw...@apache.org>
Authored: Fri Jun 10 09:02:28 2016 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Fri Jun 10 09:02:28 2016 -0700

----------------------------------------------------------------------
 .../src/main/webapps/hdfs/explorer.html         |  9 +++
 .../src/main/webapps/hdfs/explorer.js           | 83 ++++++++++++++++----
 .../src/main/webapps/static/hadoop.css          |  7 ++
 3 files changed, 83 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d44f4745/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index 51f72e5..ad8c374 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -179,6 +179,13 @@
           data-target="#modal-upload-file" title="Upload Files">
             <span class="glyphicon glyphicon-cloud-upload"></span>
         </button>
+        <button class="btn btn-default dropdown-toggle" type="button"
+          data-toggle="dropdown" title="Cut & Paste">
+        <span class="glyphicon glyphicon-list-alt"></span></button>
+        <ul class="dropdown-menu cut-paste">
+          <li><a id="explorer-cut">Cut</a></li>
+          <li><a id="explorer-paste">Paste</a></li>
+        </ul>
       </div>
     </div>
 
@@ -236,6 +243,7 @@
       <table class="table" id="table-explorer">
         <thead>
           <tr>
+            <th><input type="checkbox" id="file-selector-all"></th>
             <th title="Permissions">Permission</th>
             <th>Owner</th>
             <th>Group</th>
@@ -251,6 +259,7 @@
           {#FileStatus}
           <tr inode-path="{pathSuffix}" data-permission="{permission}"
             class="explorer-entry">
+            <td><input type="checkbox" class="file_selector"> </td>
             <td><span class="explorer-perm-links editable-click">
               {type|helper_to_directory}{permission|helper_to_permission}
               {aclBit|helper_to_acl_bit}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d44f4745/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index 6fa5f19..1739db2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -310,22 +310,28 @@
           var absolute_file_path = append_path(current_directory, inode_name);
           delete_path(inode_name, absolute_file_path);
         });
-          
-          $('#table-explorer').dataTable( {
-              'lengthMenu': [ [25, 50, 100, -1], [25, 50, 100, "All"] ],
-              'columns': [
-                  {'searchable': false }, //Permissions
-                  null, //Owner
-                  null, //Group
-                  { 'searchable': false, 'render': func_size_render}, //Size
-                  { 'searchable': false, 'render': func_time_render}, //Last Modified
-                  { 'searchable': false }, //Replication
-                  null, //Block Size
-                  null, //Name
-                  { 'sortable' : false } //Trash
-              ],
-              "deferRender": true
-          });
+
+        $('#file-selector-all').click(function() {
+          $('.file_selector').prop('checked', $('#file-selector-all')[0].checked );
+        });
+
+        //This needs to be last because it repaints the table
+        $('#table-explorer').dataTable( {
+          'lengthMenu': [ [25, 50, 100, -1], [25, 50, 100, "All"] ],
+          'columns': [
+            { 'orderable' : false }, //select
+            {'searchable': false }, //Permissions
+            null, //Owner
+            null, //Group
+            { 'searchable': false, 'render': func_size_render}, //Size
+            { 'searchable': false, 'render': func_time_render}, //Last Modified
+            { 'searchable': false }, //Replication
+            null, //Block Size
+            null, //Name
+            { 'orderable' : false } //Trash
+          ],
+          "deferRender": true
+        });
       });
     }).error(network_error_handler(url));
   }
@@ -417,5 +423,50 @@
     }
   });
 
+  //Store the list of files which have been checked into session storage
+  function store_selected_files(current_directory) {
+    sessionStorage.setItem("source_directory", current_directory);
+    var selected_files = $("input:checked.file_selector");
+    var selected_file_names = new Array();
+    selected_files.each(function(index) {
+      selected_file_names[index] = $(this).closest('tr').attr('inode-path');
+    })
+    sessionStorage.setItem("selected_file_names", JSON.stringify(selected_file_names));
+    alert("Cut " + selected_file_names.length + " files/directories");
+  }
+
+  //Retrieve the list of files from session storage and rename them to the current
+  //directory
+  function paste_selected_files() {
+    var files = JSON.parse(sessionStorage.getItem("selected_file_names"));
+    var source_directory = sessionStorage.getItem("source_directory");
+    $.each(files, function(index, value) {
+      var url = "/webhdfs/v1"
+        + encode_path(append_path(source_directory, value))
+        + '?op=RENAME&destination='
+        + encode_path(append_path(current_directory, value));
+      $.ajax({
+        type: 'PUT',
+        url: url
+      }).done(function(data) {
+        if(index == files.length - 1) {
+          browse_directory(current_directory);
+        }
+      }).error(function(jqXHR, textStatus, errorThrown) {
+        show_err_msg("Couldn't move file " + value + ". " + errorThrown);
+      });
+
+    })
+  }
+
+  $('#explorer-cut').click(function() {
+    store_selected_files(current_directory);
+  });
+
+  $('#explorer-paste').click(function() {
+    paste_selected_files();
+  });
+
+
   init();
 })();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d44f4745/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
index 2ed5f29..5021fb5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
@@ -285,4 +285,11 @@ header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
     margin-bottom: 0;
     font-weight: normal;
     cursor: pointer;
+}
+
+.cut-paste {
+  width: 75px;
+  min-width: 75px;
+  float: right;
+  left: 75px;
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/25] hadoop git commit: HDFS-10516. Fix bug when warming up EDEK cache of more than one encryption zone. Contributed by Xiao Chen.

Posted by ae...@apache.org.
HDFS-10516. Fix bug when warming up EDEK cache of more than one encryption zone. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/709a814f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/709a814f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/709a814f

Branch: refs/heads/HDFS-1312
Commit: 709a814fe0153e86a37806796ea27c8252d9c6d1
Parents: 0accc33
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Jun 13 11:50:19 2016 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Jun 13 11:50:19 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/EncryptionZoneManager.java    | 2 +-
 .../hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java    | 3 +++
 .../org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java    | 7 ++++++-
 3 files changed, 10 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/709a814f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 41dbb59..511c616 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -416,7 +416,7 @@ public class EncryptionZoneManager {
     int index = 0;
     for (Map.Entry<Long, EncryptionZoneInt> entry : encryptionZones
         .entrySet()) {
-      ret[index] = entry.getValue().getKeyName();
+      ret[index++] = entry.getValue().getKeyName();
     }
     return ret;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/709a814f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
index 2997179..2319741 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
@@ -370,6 +370,9 @@ final class FSDirEncryptionZoneOp {
           } else {
             NameNode.LOG.debug("Failed to warm up EDEKs.", ioe);
           }
+        } catch (Exception e) {
+          NameNode.LOG.error("Cannot warm up EDEKs.", e);
+          throw e;
         }
         try {
           Thread.sleep(retryInterval);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/709a814f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
index b29a108..959e724 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
@@ -100,9 +100,14 @@ public class TestEncryptionZonesWithKMS extends TestEncryptionZones {
 
   @Test(timeout = 120000)
   public void testWarmupEDEKCacheOnStartup() throws Exception {
-    final Path zonePath = new Path("/TestEncryptionZone");
+    Path zonePath = new Path("/TestEncryptionZone");
     fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false);
     dfsAdmin.createEncryptionZone(zonePath, TEST_KEY, NO_TRASH);
+    final String anotherKey = "k2";
+    zonePath = new Path("/TestEncryptionZone2");
+    DFSTestUtil.createKey(anotherKey, cluster, conf);
+    fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false);
+    dfsAdmin.createEncryptionZone(zonePath, anotherKey, NO_TRASH);
 
     @SuppressWarnings("unchecked")
     KMSClientProvider spy = (KMSClientProvider) Whitebox


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/25] hadoop git commit: YARN-3426. Add jdiff support to YARN. (vinodkv via wangda)

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/03fc6b1b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.7.2.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.7.2.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.7.2.xml
new file mode 100644
index 0000000..ff01b26
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.7.2.xml
@@ -0,0 +1,13692 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Thu May 12 17:47:18 PDT 2016 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="hadoop-yarn-api 2.7.2"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/classes:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2
 .jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-
 2.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/.m2/repos
 itory/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core
 -asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/a
 pi-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.7
 .0_45.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/site/jdiff/xml -apiname hadoop-yarn-api 2.7.2 -->
+<package name="org.apache.hadoop.yarn.api">
+  <!-- start interface org.apache.hadoop.yarn.api.ApplicationClientProtocol -->
+  <interface name="ApplicationClientProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"/>
+    <method name="getNewApplication" return="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to obtain a new {@link ApplicationId} for
+ submitting new applications.</p>
+
+ <p>The <code>ResourceManager</code> responds with a new, monotonically
+ increasing, {@link ApplicationId} which is used by the client to submit
+ a new application.</p>
+
+ <p>The <code>ResourceManager</code> also responds with details such
+ as maximum resource capabilities in the cluster as specified in
+ {@link GetNewApplicationResponse}.</p>
+
+ @param request request to get a new <code>ApplicationId</code>
+ @return response containing the new <code>ApplicationId</code> to be used
+ to submit an application
+ @throws YarnException
+ @throws IOException
+ @see #submitApplication(SubmitApplicationRequest)]]>
+      </doc>
+    </method>
+    <method name="submitApplication" return="org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to submit a new application to the
+ <code>ResourceManager.</code></p>
+
+ <p>The client is required to provide details such as queue,
+ {@link Resource} required to run the <code>ApplicationMaster</code>,
+ the equivalent of {@link ContainerLaunchContext} for launching
+ the <code>ApplicationMaster</code> etc. via the
+ {@link SubmitApplicationRequest}.</p>
+
+ <p>Currently the <code>ResourceManager</code> sends an immediate (empty)
+ {@link SubmitApplicationResponse} on accepting the submission and throws
+ an exception if it rejects the submission. However, this call needs to be
+ followed by {@link #getApplicationReport(GetApplicationReportRequest)}
+ to make sure that the application gets properly submitted - obtaining a
+ {@link SubmitApplicationResponse} from ResourceManager doesn't guarantee
+ that RM 'remembers' this application beyond failover or restart. If RM
+ failover or RM restart happens before ResourceManager saves the
+ application's state successfully, the subsequent
+ {@link #getApplicationReport(GetApplicationReportRequest)} will throw
+ a {@link ApplicationNotFoundException}. The Clients need to re-submit
+ the application with the same {@link ApplicationSubmissionContext} when
+ it encounters the {@link ApplicationNotFoundException} on the
+ {@link #getApplicationReport(GetApplicationReportRequest)} call.</p>
+
+ <p>During the submission process, it checks whether the application
+ already exists. If the application exists, it will simply return
+ SubmitApplicationResponse</p>
+
+ <p> In secure mode,the <code>ResourceManager</code> verifies access to
+ queues etc. before accepting the application submission.</p>
+
+ @param request request to submit a new application
+ @return (empty) response on accepting the submission
+ @throws YarnException
+ @throws IOException
+ @see #getNewApplication(GetNewApplicationRequest)]]>
+      </doc>
+    </method>
+    <method name="forceKillApplication" return="org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to request the
+ <code>ResourceManager</code> to abort submitted application.</p>
+
+ <p>The client, via {@link KillApplicationRequest} provides the
+ {@link ApplicationId} of the application to be aborted.</p>
+
+ <p> In secure mode,the <code>ResourceManager</code> verifies access to the
+ application, queue etc. before terminating the application.</p>
+
+ <p>Currently, the <code>ResourceManager</code> returns an empty response
+ on success and throws an exception on rejecting the request.</p>
+
+ @param request request to abort a submitted application
+ @return <code>ResourceManager</code> returns an empty response
+         on success and throws an exception on rejecting the request
+ @throws YarnException
+ @throws IOException
+ @see #getQueueUserAcls(GetQueueUserAclsInfoRequest)]]>
+      </doc>
+    </method>
+    <method name="getClusterMetrics" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to get metrics about the cluster from
+ the <code>ResourceManager</code>.</p>
+
+ <p>The <code>ResourceManager</code> responds with a
+ {@link GetClusterMetricsResponse} which includes the
+ {@link YarnClusterMetrics} with details such as number of current
+ nodes in the cluster.</p>
+
+ @param request request for cluster metrics
+ @return cluster metrics
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getClusterNodes" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to get a report of all nodes
+ in the cluster from the <code>ResourceManager</code>.</p>
+
+ <p>The <code>ResourceManager</code> responds with a
+ {@link GetClusterNodesResponse} which includes the
+ {@link NodeReport} for all the nodes in the cluster.</p>
+
+ @param request request for report on all nodes
+ @return report on all nodes
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueInfo" return="org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to get information about <em>queues</em>
+ from the <code>ResourceManager</code>.</p>
+
+ <p>The client, via {@link GetQueueInfoRequest}, can ask for details such
+ as used/total resources, child queues, running applications etc.</p>
+
+ <p> In secure mode,the <code>ResourceManager</code> verifies access before
+ providing the information.</p>
+
+ @param request request to get queue information
+ @return queue information
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueUserAcls" return="org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to get information about <em>queue
+ acls</em> for <em>current user</em> from the <code>ResourceManager</code>.
+ </p>
+
+ <p>The <code>ResourceManager</code> responds with queue acls for all
+ existing queues.</p>
+
+ @param request request to get queue acls for <em>current user</em>
+ @return queue acls for <em>current user</em>
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="moveApplicationAcrossQueues" return="org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Move an application to a new queue.
+
+ @param request the application ID and the target queue
+ @return an empty response
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="submitReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to submit a new reservation to the
+ {@code ResourceManager}.
+ </p>
+
+ <p>
+ The client packages all details of its request in a
+ {@link ReservationSubmissionRequest} object. This contains information
+ about the amount of capacity, temporal constraints, and concurrency needs.
+ Furthermore, the reservation might be composed of multiple stages, with
+ ordering dependencies among them.
+ </p>
+
+ <p>
+ In order to respond, a new admission control component in the
+ {@code ResourceManager} performs an analysis of the resources that have
+ been committed over the period of time the user is requesting, verify that
+ the user requests can be fulfilled, and that it respect a sharing policy
+ (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined
+ that the ReservationSubmissionRequest is satisfiable the
+ {@code ResourceManager} answers with a
+ {@link ReservationSubmissionResponse} that include a non-null
+ {@link ReservationId}. Upon failure to find a valid allocation the response
+ is an exception with the reason.
+
+ On application submission the client can use this {@link ReservationId} to
+ obtain access to the reserved resources.
+ </p>
+
+ <p>
+ The system guarantees that during the time-range specified by the user, the
+ reservationID will be corresponding to a valid reservation. The amount of
+ capacity dedicated to such queue can vary overtime, depending of the
+ allocation that has been determined. But it is guaranteed to satisfy all
+ the constraint expressed by the user in the
+ {@link ReservationSubmissionRequest}.
+ </p>
+
+ @param request the request to submit a new Reservation
+ @return response the {@link ReservationId} on accepting the submission
+ @throws YarnException if the request is invalid or reservation cannot be
+           created successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="updateReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to update an existing Reservation. This is
+ referred to as a re-negotiation process, in which a user that has
+ previously submitted a Reservation.
+ </p>
+
+ <p>
+ The allocation is attempted by virtually substituting all previous
+ allocations related to this Reservation with new ones, that satisfy the new
+ {@link ReservationUpdateRequest}. Upon success the previous allocation is
+ substituted by the new one, and on failure (i.e., if the system cannot find
+ a valid allocation for the updated request), the previous allocation
+ remains valid.
+
+ The {@link ReservationId} is not changed, and applications currently
+ running within this reservation will automatically receive the resources
+ based on the new allocation.
+ </p>
+
+ @param request to update an existing Reservation (the ReservationRequest
+          should refer to an existing valid {@link ReservationId})
+ @return response empty on successfully updating the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           updated successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="deleteReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to remove an existing Reservation.
+
+ Upon deletion of a reservation applications running with this reservation,
+ are automatically downgraded to normal jobs running without any dedicated
+ reservation.
+ </p>
+
+ @param request to remove an existing Reservation (the ReservationRequest
+          should refer to an existing valid {@link ReservationId})
+ @return response empty on successfully deleting the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           deleted successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getNodeToLabels" return="org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node to labels mappings in existing cluster
+ </p>
+
+ @param request
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getLabelsToNodes" return="org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get labels to nodes mappings
+ in existing cluster
+ </p>
+
+ @param request
+ @return labels to nodes mappings
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeLabels" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node labels in the cluster
+ </p>
+
+ @param request to get node labels collection of this cluster
+ @return node labels collection of this cluster
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The protocol between clients and the <code>ResourceManager</code>
+ to submit/abort jobs and to get information on applications, cluster metrics,
+ nodes, queues and ACLs.</p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ApplicationClientProtocol -->
+  <!-- start interface org.apache.hadoop.yarn.api.ApplicationConstants -->
+  <interface name="ApplicationConstants"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <field name="APP_SUBMIT_TIME_ENV" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The environment variable for APP_SUBMIT_TIME. Set in AppMaster environment
+ only]]>
+      </doc>
+    </field>
+    <field name="CONTAINER_TOKEN_FILE_ENV_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The cache file into which container token is written]]>
+      </doc>
+    </field>
+    <field name="APPLICATION_WEB_PROXY_BASE_ENV" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The environmental variable for APPLICATION_WEB_PROXY_BASE. Set in
+ ApplicationMaster's environment only. This states that for all non-relative
+ web URLs in the app masters web UI what base should they have.]]>
+      </doc>
+    </field>
+    <field name="LOG_DIR_EXPANSION_VAR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The temporary environmental variable for container log directory. This
+ should be replaced by real container log directory on container launch.]]>
+      </doc>
+    </field>
+    <field name="CLASS_PATH_SEPARATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This constant is used to construct class path and it will be replaced with
+ real class path separator(':' for Linux and ';' for Windows) by
+ NodeManager on container launch. User has to use this constant to construct
+ class path if user wants cross-platform practice i.e. submit an application
+ from a Windows client to a Linux/Unix server or vice versa.]]>
+      </doc>
+    </field>
+    <field name="PARAMETER_EXPANSION_LEFT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The following two constants are used to expand parameter and it will be
+ replaced with real parameter expansion marker ('%' for Windows and '$' for
+ Linux) by NodeManager on container launch. For example: {{VAR}} will be
+ replaced as $VAR on Linux, and %VAR% on Windows. User has to use this
+ constant to construct class path if user wants cross-platform practice i.e.
+ submit an application from a Windows client to a Linux/Unix server or vice
+ versa.]]>
+      </doc>
+    </field>
+    <field name="PARAMETER_EXPANSION_RIGHT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[User has to use this constant to construct class path if user wants
+ cross-platform practice i.e. submit an application from a Windows client to
+ a Linux/Unix server or vice versa.]]>
+      </doc>
+    </field>
+    <field name="STDERR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="STDOUT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MAX_APP_ATTEMPTS_ENV" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The environment variable for MAX_APP_ATTEMPTS. Set in AppMaster environment
+ only]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[This is the API for the applications comprising of constants that YARN sets
+ up for the applications and the containers.
+
+ TODO: Investigate the semantics and security of each cross-boundary refs.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ApplicationConstants -->
+  <!-- start class org.apache.hadoop.yarn.api.ApplicationConstants.Environment -->
+  <class name="ApplicationConstants.Environment" extends="java.lang.Enum"
+    abstract="false"
+    static="true" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.ApplicationConstants.Environment[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.ApplicationConstants.Environment"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="key" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="$" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Expand the environment variable based on client OS environment variable
+ expansion syntax (e.g. $VAR for Linux and %VAR% for Windows).
+ <p>
+ Note: Use $$() method for cross-platform practice i.e. submit an
+ application from a Windows client to a Linux/Unix server or vice versa.
+ </p>]]>
+      </doc>
+    </method>
+    <method name="$$" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Expand the environment variable in platform-agnostic syntax. The
+ parameter expansion marker "{{VAR}}" will be replaced with real parameter
+ expansion marker ('%' for Windows and '$' for Linux) by NodeManager on
+ container launch. For example: {{VAR}} will be replaced as $VAR on Linux,
+ and %VAR% on Windows.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Environment for Applications.
+
+ Some of the environment variables for applications are <em>final</em>
+ i.e. they cannot be modified by the applications.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.ApplicationConstants.Environment -->
+  <!-- start interface org.apache.hadoop.yarn.api.ApplicationHistoryProtocol -->
+  <interface name="ApplicationHistoryProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"/>
+    <doc>
+    <![CDATA[<p>
+ The protocol between clients and the <code>ApplicationHistoryServer</code> to
+ get the information of completed applications etc.
+ </p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ApplicationHistoryProtocol -->
+  <!-- start interface org.apache.hadoop.yarn.api.ApplicationMasterProtocol -->
+  <interface name="ApplicationMasterProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by a new <code>ApplicationMaster</code> to register with
+ the <code>ResourceManager</code>.
+ </p>
+
+ <p>
+ The <code>ApplicationMaster</code> needs to provide details such as RPC
+ Port, HTTP tracking url etc. as specified in
+ {@link RegisterApplicationMasterRequest}.
+ </p>
+
+ <p>
+ The <code>ResourceManager</code> responds with critical details such as
+ maximum resource capabilities in the cluster as specified in
+ {@link RegisterApplicationMasterResponse}.
+ </p>
+
+ @param request
+          registration request
+ @return registration respose
+ @throws YarnException
+ @throws IOException
+ @throws InvalidApplicationMasterRequestException
+           The exception is thrown when an ApplicationMaster tries to
+           register more then once.
+ @see RegisterApplicationMasterRequest
+ @see RegisterApplicationMasterResponse]]>
+      </doc>
+    </method>
+    <method name="finishApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by an <code>ApplicationMaster</code> to notify the
+ <code>ResourceManager</code> about its completion (success or failed).</p>
+
+ <p>The <code>ApplicationMaster</code> has to provide details such as
+ final state, diagnostics (in case of failures) etc. as specified in
+ {@link FinishApplicationMasterRequest}.</p>
+
+ <p>The <code>ResourceManager</code> responds with
+ {@link FinishApplicationMasterResponse}.</p>
+
+ @param request completion request
+ @return completion response
+ @throws YarnException
+ @throws IOException
+ @see FinishApplicationMasterRequest
+ @see FinishApplicationMasterResponse]]>
+      </doc>
+    </method>
+    <method name="allocate" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The main interface between an <code>ApplicationMaster</code> and the
+ <code>ResourceManager</code>.
+ </p>
+
+ <p>
+ The <code>ApplicationMaster</code> uses this interface to provide a list of
+ {@link ResourceRequest} and returns unused {@link Container} allocated to
+ it via {@link AllocateRequest}. Optionally, the
+ <code>ApplicationMaster</code> can also <em>blacklist</em> resources which
+ it doesn't want to use.
+ </p>
+
+ <p>
+ This also doubles up as a <em>heartbeat</em> to let the
+ <code>ResourceManager</code> know that the <code>ApplicationMaster</code>
+ is alive. Thus, applications should periodically make this call to be kept
+ alive. The frequency depends on
+ {@link YarnConfiguration#RM_AM_EXPIRY_INTERVAL_MS} which defaults to
+ {@link YarnConfiguration#DEFAULT_RM_AM_EXPIRY_INTERVAL_MS}.
+ </p>
+
+ <p>
+ The <code>ResourceManager</code> responds with list of allocated
+ {@link Container}, status of completed containers and headroom information
+ for the application.
+ </p>
+
+ <p>
+ The <code>ApplicationMaster</code> can use the available headroom
+ (resources) to decide how to utilized allocated resources and make informed
+ decisions about future resource requests.
+ </p>
+
+ @param request
+          allocation request
+ @return allocation response
+ @throws YarnException
+ @throws IOException
+ @throws InvalidApplicationMasterRequestException
+           This exception is thrown when an ApplicationMaster calls allocate
+           without registering first.
+ @throws InvalidResourceBlacklistRequestException
+           This exception is thrown when an application provides an invalid
+           specification for blacklist of resources.
+ @throws InvalidResourceRequestException
+           This exception is thrown when a {@link ResourceRequest} is out of
+           the range of the configured lower and upper limits on the
+           resources.
+ @see AllocateRequest
+ @see AllocateResponse]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The protocol between a live instance of <code>ApplicationMaster</code>
+ and the <code>ResourceManager</code>.</p>
+
+ <p>This is used by the <code>ApplicationMaster</code> to register/unregister
+ and to request and obtain resources in the cluster from the
+ <code>ResourceManager</code>.</p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ApplicationMasterProtocol -->
+  <!-- start interface org.apache.hadoop.yarn.api.ClientSCMProtocol -->
+  <interface name="ClientSCMProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="use" return="org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to claim a resource with the
+ <code>SharedCacheManager.</code> The client uses a checksum to identify the
+ resource and an {@link ApplicationId} to identify which application will be
+ using the resource.
+ </p>
+
+ <p>
+ The <code>SharedCacheManager</code> responds with whether or not the
+ resource exists in the cache. If the resource exists, a <code>Path</code>
+ to the resource in the shared cache is returned. If the resource does not
+ exist, the response is empty.
+ </p>
+
+ @param request request to claim a resource in the shared cache
+ @return response indicating if the resource is already in the cache
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="release" return="org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to release a resource with the
+ <code>SharedCacheManager.</code> This method is called once an application
+ is no longer using a claimed resource in the shared cache. The client uses
+ a checksum to identify the resource and an {@link ApplicationId} to
+ identify which application is releasing the resource.
+ </p>
+
+ <p>
+ Note: This method is an optimization and the client is not required to call
+ it for correctness.
+ </p>
+
+ <p>
+ Currently the <code>SharedCacheManager</code> sends an empty response.
+ </p>
+
+ @param request request to release a resource in the shared cache
+ @return (empty) response on releasing the resource
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The protocol between clients and the <code>SharedCacheManager</code> to claim
+ and release resources in the shared cache.
+ </p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ClientSCMProtocol -->
+  <!-- start interface org.apache.hadoop.yarn.api.ClientSCMProtocolPB -->
+  <interface name="ClientSCMProtocolPB"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.proto.ClientSCMProtocol.ClientSCMProtocolService.BlockingInterface"/>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ClientSCMProtocolPB -->
+  <!-- start interface org.apache.hadoop.yarn.api.ContainerManagementProtocol -->
+  <interface name="ContainerManagementProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="startContainers" return="org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The <code>ApplicationMaster</code> provides a list of
+ {@link StartContainerRequest}s to a <code>NodeManager</code> to
+ <em>start</em> {@link Container}s allocated to it using this interface.
+ </p>
+
+ <p>
+ The <code>ApplicationMaster</code> has to provide details such as allocated
+ resource capability, security tokens (if enabled), command to be executed
+ to start the container, environment for the process, necessary
+ binaries/jar/shared-objects etc. via the {@link ContainerLaunchContext} in
+ the {@link StartContainerRequest}.
+ </p>
+
+ <p>
+ The <code>NodeManager</code> sends a response via
+ {@link StartContainersResponse} which includes a list of
+ {@link Container}s of successfully launched {@link Container}s, a
+ containerId-to-exception map for each failed {@link StartContainerRequest} in
+ which the exception indicates errors from per container and a
+ allServicesMetaData map between the names of auxiliary services and their
+ corresponding meta-data. Note: None-container-specific exceptions will
+ still be thrown by the API method itself.
+ </p>
+ <p>
+ The <code>ApplicationMaster</code> can use
+ {@link #getContainerStatuses(GetContainerStatusesRequest)} to get updated
+ statuses of the to-be-launched or launched containers.
+ </p>
+
+ @param request
+          request to start a list of containers
+ @return response including conatinerIds of all successfully launched
+         containers, a containerId-to-exception map for failed requests and
+         a allServicesMetaData map.
+ @throws YarnException
+ @throws IOException
+ @throws NMNotYetReadyException
+           This exception is thrown when NM starts from scratch but has not
+           yet connected with RM.]]>
+      </doc>
+    </method>
+    <method name="stopContainers" return="org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The <code>ApplicationMaster</code> requests a <code>NodeManager</code> to
+ <em>stop</em> a list of {@link Container}s allocated to it using this
+ interface.
+ </p>
+
+ <p>
+ The <code>ApplicationMaster</code> sends a {@link StopContainersRequest}
+ which includes the {@link ContainerId}s of the containers to be stopped.
+ </p>
+
+ <p>
+ The <code>NodeManager</code> sends a response via
+ {@link StopContainersResponse} which includes a list of {@link ContainerId}
+ s of successfully stopped containers, a containerId-to-exception map for
+ each failed request in which the exception indicates errors from per
+ container. Note: None-container-specific exceptions will still be thrown by
+ the API method itself. <code>ApplicationMaster</code> can use
+ {@link #getContainerStatuses(GetContainerStatusesRequest)} to get updated
+ statuses of the containers.
+ </p>
+
+ @param request
+          request to stop a list of containers
+ @return response which includes a list of containerIds of successfully
+         stopped containers, a containerId-to-exception map for failed
+         requests.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerStatuses" return="org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The API used by the <code>ApplicationMaster</code> to request for current
+ statuses of <code>Container</code>s from the <code>NodeManager</code>.
+ </p>
+
+ <p>
+ The <code>ApplicationMaster</code> sends a
+ {@link GetContainerStatusesRequest} which includes the {@link ContainerId}s
+ of all containers whose statuses are needed.
+ </p>
+
+ <p>
+ The <code>NodeManager</code> responds with
+ {@link GetContainerStatusesResponse} which includes a list of
+ {@link ContainerStatus} of the successfully queried containers and a
+ containerId-to-exception map for each failed request in which the exception
+ indicates errors from per container. Note: None-container-specific
+ exceptions will still be thrown by the API method itself.
+ </p>
+
+ @param request
+          request to get <code>ContainerStatus</code>es of containers with
+          the specified <code>ContainerId</code>s
+ @return response containing the list of <code>ContainerStatus</code> of the
+         successfully queried containers and a containerId-to-exception map
+         for failed requests.
+
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The protocol between an <code>ApplicationMaster</code> and a
+ <code>NodeManager</code> to start/stop containers and to get status
+ of running containers.</p>
+
+ <p>If security is enabled the <code>NodeManager</code> verifies that the
+ <code>ApplicationMaster</code> has truly been allocated the container
+ by the <code>ResourceManager</code> and also verifies all interactions such
+ as stopping the container or obtaining status information for the container.
+ </p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ContainerManagementProtocol -->
+</package>
+<package name="org.apache.hadoop.yarn.api.protocolrecords">
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest -->
+  <class name="AllocateRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AllocateRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseID" type="int"/>
+      <param name="appProgress" type="float"/>
+      <param name="resourceAsk" type="java.util.List"/>
+      <param name="containersToBeReleased" type="java.util.List"/>
+      <param name="resourceBlacklistRequest" type="org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseID" type="int"/>
+      <param name="appProgress" type="float"/>
+      <param name="resourceAsk" type="java.util.List"/>
+      <param name="containersToBeReleased" type="java.util.List"/>
+      <param name="resourceBlacklistRequest" type="org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest"/>
+      <param name="increaseRequests" type="java.util.List"/>
+    </method>
+    <method name="getResponseId" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>response id</em> used to track duplicate responses.
+ @return <em>response id</em>]]>
+      </doc>
+    </method>
+    <method name="setResponseId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="id" type="int"/>
+      <doc>
+      <![CDATA[Set the <em>response id</em> used to track duplicate responses.
+ @param id <em>response id</em>]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>current progress</em> of application.
+ @return <em>current progress</em> of application]]>
+      </doc>
+    </method>
+    <method name="setProgress"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="progress" type="float"/>
+      <doc>
+      <![CDATA[Set the <em>current progress</em> of application
+ @param progress <em>current progress</em> of application]]>
+      </doc>
+    </method>
+    <method name="getAskList" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <code>ResourceRequest</code> to update the
+ <code>ResourceManager</code> about the application's resource requirements.
+ @return the list of <code>ResourceRequest</code>
+ @see ResourceRequest]]>
+      </doc>
+    </method>
+    <method name="setAskList"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceRequests" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set list of <code>ResourceRequest</code> to update the
+ <code>ResourceManager</code> about the application's resource requirements.
+ @param resourceRequests list of <code>ResourceRequest</code> to update the
+                        <code>ResourceManager</code> about the application's
+                        resource requirements
+ @see ResourceRequest]]>
+      </doc>
+    </method>
+    <method name="getReleaseList" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <code>ContainerId</code> of containers being
+ released by the <code>ApplicationMaster</code>.
+ @return list of <code>ContainerId</code> of containers being
+         released by the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="setReleaseList"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="releaseContainers" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the list of <code>ContainerId</code> of containers being
+ released by the <code>ApplicationMaster</code>
+ @param releaseContainers list of <code>ContainerId</code> of
+                          containers being released by the
+                          <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="getResourceBlacklistRequest" return="org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ResourceBlacklistRequest</code> being sent by the
+ <code>ApplicationMaster</code>.
+ @return the <code>ResourceBlacklistRequest</code> being sent by the
+         <code>ApplicationMaster</code>
+ @see ResourceBlacklistRequest]]>
+      </doc>
+    </method>
+    <method name="setResourceBlacklistRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceBlacklistRequest" type="org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest"/>
+      <doc>
+      <![CDATA[Set the <code>ResourceBlacklistRequest</code> to inform the
+ <code>ResourceManager</code> about the blacklist additions and removals
+ per the <code>ApplicationMaster</code>.
+
+ @param resourceBlacklistRequest the <code>ResourceBlacklistRequest</code>
+                         to inform the <code>ResourceManager</code> about
+                         the blacklist additions and removals
+                         per the <code>ApplicationMaster</code>
+ @see ResourceBlacklistRequest]]>
+      </doc>
+    </method>
+    <method name="getIncreaseRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerResourceIncreaseRequest</code> being sent by the
+ <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="setIncreaseRequests"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="increaseRequests" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the <code>ContainerResourceIncreaseRequest</code> to inform the
+ <code>ResourceManager</code> about some container's resources need to be
+ increased]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The core request sent by the <code>ApplicationMaster</code> to the
+ <code>ResourceManager</code> to obtain resources in the cluster.</p>
+
+ <p>The request includes:
+ <ul>
+   <li>A response id to track duplicate responses.</li>
+   <li>Progress information.</li>
+   <li>
+     A list of {@link ResourceRequest} to inform the
+     <code>ResourceManager</code> about the application's
+     resource requirements.
+   </li>
+   <li>
+     A list of unused {@link Container} which are being returned.
+   </li>
+ </ul>
+
+ @see ApplicationMasterProtocol#allocate(AllocateRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse -->
+  <class name="AllocateResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AllocateResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseId" type="int"/>
+      <param name="completedContainers" type="java.util.List"/>
+      <param name="allocatedContainers" type="java.util.List"/>
+      <param name="updatedNodes" type="java.util.List"/>
+      <param name="availResources" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="command" type="org.apache.hadoop.yarn.api.records.AMCommand"/>
+      <param name="numClusterNodes" type="int"/>
+      <param name="preempt" type="org.apache.hadoop.yarn.api.records.PreemptionMessage"/>
+      <param name="nmTokens" type="java.util.List"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseId" type="int"/>
+      <param name="completedContainers" type="java.util.List"/>
+      <param name="allocatedContainers" type="java.util.List"/>
+      <param name="updatedNodes" type="java.util.List"/>
+      <param name="availResources" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="command" type="org.apache.hadoop.yarn.api.records.AMCommand"/>
+      <param name="numClusterNodes" type="int"/>
+      <param name="preempt" type="org.apache.hadoop.yarn.api.records.PreemptionMessage"/>
+      <param name="nmTokens" type="java.util.List"/>
+      <param name="increasedContainers" type="java.util.List"/>
+      <param name="decreasedContainers" type="java.util.List"/>
+    </method>
+    <method name="getAMCommand" return="org.apache.hadoop.yarn.api.records.AMCommand"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[If the <code>ResourceManager</code> needs the
+ <code>ApplicationMaster</code> to take some action then it will send an
+ AMCommand to the <code>ApplicationMaster</code>. See <code>AMCommand</code>
+ for details on commands and actions for them.
+ @return <code>AMCommand</code> if the <code>ApplicationMaster</code> should
+         take action, <code>null</code> otherwise
+ @see AMCommand]]>
+      </doc>
+    </method>
+    <method name="getResponseId" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>last response id</em>.
+ @return <em>last response id</em>]]>
+      </doc>
+    </method>
+    <method name="getAllocatedContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <em>newly allocated</em> <code>Container</code> by the
+ <code>ResourceManager</code>.
+ @return list of <em>newly allocated</em> <code>Container</code>]]>
+      </doc>
+    </method>
+    <method name="getAvailableResources" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>available headroom</em> for resources in the cluster for the
+ application.
+ @return limit of available headroom for resources in the cluster for the
+ application]]>
+      </doc>
+    </method>
+    <method name="getCompletedContainersStatuses" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <em>completed containers' statuses</em>.
+ @return the list of <em>completed containers' statuses</em>]]>
+      </doc>
+    </method>
+    <method name="getUpdatedNodes" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <em>updated <code>NodeReport</code>s</em>. Updates could
+ be changes in health, availability etc of the nodes.
+ @return The delta of updated nodes since the last response]]>
+      </doc>
+    </method>
+    <method name="getNumClusterNodes" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of hosts available on the cluster.
+ @return the available host count.]]>
+      </doc>
+    </method>
+    <method name="getPreemptionMessage" return="org.apache.hadoop.yarn.api.records.PreemptionMessage"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the description of containers owned by the AM, but requested back by
+ the cluster. Note that the RM may have an inconsistent view of the
+ resources owned by the AM. These messages are advisory, and the AM may
+ elect to ignore them.
+ <p>
+ The message is a snapshot of the resources the RM wants back from the AM.
+ While demand persists, the RM will repeat its request; applications should
+ not interpret each message as a request for <em>additional</em>
+ resources on top of previous messages. Resources requested consistently
+ over some duration may be forcibly killed by the RM.
+
+ @return A specification of the resources to reclaim from this AM.]]>
+      </doc>
+    </method>
+    <method name="getNMTokens" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of NMTokens required for communicating with NM. New NMTokens
+ issued only if
+ <p>
+ 1) AM is receiving first container on underlying NodeManager.<br>
+ OR<br>
+ 2) NMToken master key rolled over in ResourceManager and AM is getting new
+ container on the same underlying NodeManager.
+ <p>
+ AM will receive one NMToken per NM irrespective of the number of containers
+ issued on same NM. AM is expected to store these tokens until issued a
+ new token for the same NM.]]>
+      </doc>
+    </method>
+    <method name="getIncreasedContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of newly increased containers by <code>ResourceManager</code>]]>
+      </doc>
+    </method>
+    <method name="getDecreasedContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of newly decreased containers by <code>NodeManager</code>]]>
+      </doc>
+    </method>
+    <method name="getAMRMToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The AMRMToken that belong to this attempt
+
+ @return The AMRMToken that belong to this attempt]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The response sent by the <code>ResourceManager</code> the
+ <code>ApplicationMaster</code> during resource negotiation.
+ <p>
+ The response, includes:
+ <ul>
+   <li>Response ID to track duplicate responses.</li>
+   <li>
+     An AMCommand sent by ResourceManager to let the
+     {@code ApplicationMaster} take some actions (resync, shutdown etc.).
+   </li>
+   <li>A list of newly allocated {@link Container}.</li>
+   <li>A list of completed {@link Container}s' statuses.</li>
+   <li>
+     The available headroom for resources in the cluster for the
+     application.
+   </li>
+   <li>A list of nodes whose status has been updated.</li>
+   <li>The number of available nodes in a cluster.</li>
+   <li>A description of resources requested back by the cluster</li>
+   <li>AMRMToken, if AMRMToken has been rolled over</li>
+ </ul>
+
+ @see ApplicationMasterProtocol#allocate(AllocateRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope -->
+  <class name="ApplicationsRequestScope" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Enumeration that controls the scope of applications fetched]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest -->
+  <class name="FinishApplicationMasterRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FinishApplicationMasterRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="finalAppStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <param name="diagnostics" type="java.lang.String"/>
+      <param name="url" type="java.lang.String"/>
+    </method>
+    <method name="getFinalApplicationStatus" return="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <em>final state</em> of the <code>ApplicationMaster</code>.
+ @return <em>final state</em> of the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="setFinalApplicationStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="finalState" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <doc>
+      <![CDATA[Set the <em>final state</em> of the <code>ApplicationMaster</code>
+ @param finalState <em>final state</em> of the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="getDiagnostics" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <em>diagnostic information</em> on application failure.
+ @return <em>diagnostic information</em> on application failure]]>
+      </doc>
+    </method>
+    <method name="setDiagnostics"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="diagnostics" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set <em>diagnostic information</em> on application failure.
+ @param diagnostics <em>diagnostic information</em> on application failure]]>
+      </doc>
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>tracking URL</em> for the <code>ApplicationMaster</code>.
+ This url if contains scheme then that will be used by resource manager
+ web application proxy otherwise it will default to http.
+ @return <em>tracking URL</em>for the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="setTrackingUrl"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <em>final tracking URL</em>for the <code>ApplicationMaster</code>.
+ This is the web-URL to which ResourceManager or web-application proxy will
+ redirect client/users once the application is finished and the
+ <code>ApplicationMaster</code> is gone.
+ <p>
+ If the passed url has a scheme then that will be used by the
+ ResourceManager and web-application proxy, otherwise the scheme will
+ default to http.
+ </p>
+ <p>
+ Empty, null, "N/A" strings are all valid besides a real URL. In case an url
+ isn't explicitly passed, it defaults to "N/A" on the ResourceManager.
+ <p>
+
+ @param url
+          <em>tracking URL</em>for the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The finalization request sent by the {@code ApplicationMaster} to
+ inform the {@code ResourceManager} about its completion.
+ <p>
+ The final request includes details such:
+ <ul>
+   <li>Final state of the {@code ApplicationMaster}</li>
+   <li>
+     Diagnostic information in case of failure of the
+     {@code ApplicationMaster}
+   </li>
+   <li>Tracking URL</li>
+ </ul>
+
+ @see ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse -->
+  <class name="FinishApplicationMasterResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FinishApplicationMasterResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getIsUnregistered" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the flag which indicates that the application has successfully
+ unregistered with the RM and the application can safely stop.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The response sent by the <code>ResourceManager</code> to a
+ <code>ApplicationMaster</code> on it's completion.
+ <p>
+ The response, includes:
+ <ul>
+ <li>A flag which indicates that the application has successfully unregistered
+ with the RM and the application can safely stop.</li>
+ </ul>
+ <p>
+ Note: The flag indicates whether the application has successfully
+ unregistered and is safe to stop. The application may stop after the flag is
+ true. If the application stops before the flag is true then the RM may retry
+ the application.
+
+ @see ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest -->
+  <class name="GetApplicationAttemptReportRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetApplicationAttemptReportRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+    </method>
+    <method name="getApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationAttemptId</code> of an application attempt.
+
+ @return <code>ApplicationAttemptId</code> of an application attempt]]>
+      </doc>
+    </method>
+    <method name="setApplicationAttemptId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationAttemptId</code> of an application attempt
+
+ @param applicationAttemptId
+          <code>ApplicationAttemptId</code> of an application attempt]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The request sent by a client to the <code>ResourceManager</code> to get an
+ {@link ApplicationAttemptReport} for an application attempt.
+ </p>
+
+ <p>
+ The request should include the {@link ApplicationAttemptId} of the
+ application attempt.
+ </p>
+
+ @see ApplicationAttemptReport
+ @see ApplicationHistoryProtocol#getApplicationAttemptReport(GetApplicationAttemptReportRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse -->
+  <class name="GetApplicationAttemptReportResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetApplicationAttemptReportResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ApplicationAttemptReport" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"/>
+    </method>
+    <method name="getApplicationAttemptReport" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationAttemptReport</code> for the application attempt.
+
+ @return <code>ApplicationAttemptReport</code> for the application attempt]]>
+      </doc>
+    </method>
+    <method name="setApplicationAttemptReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptReport" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"/>
+      <doc>
+      <![CDATA[Get the <code>ApplicationAttemptReport</code> for the application attempt.
+
+ @param applicationAttemptReport
+          <code>ApplicationAttemptReport</code> for the application attempt]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>ResourceManager</code> to a client requesting
+ an application attempt report.
+ </p>
+
+ <p>
+ The response includes an {@link ApplicationAttemptReport} which has the
+ details about the particular application attempt
+ </p>
+
+ @see ApplicationAttemptReport
+ @see ApplicationHistoryProtocol#getApplicationAttemptReport(GetApplicationAttemptReportRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest -->
+  <class name="GetApplicationAttemptsRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetApplicationAttemptsRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+    </method>
+    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationId</code> of an application
+
+ @return <code>ApplicationId</code> of an application]]>
+      </doc>
+    </method>
+    <method name="setApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationId</code> of an application
+
+ @param applicationId
+          <code>ApplicationId</code> of an application]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The request from clients to get a list of application attempt reports of an
+ application from the <code>ResourceManager</code>.
+ </p>
+
+ @see ApplicationHistoryProtocol#getApplicationAttempts(GetApplicationAttemptsRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse -->
+  <class name="GetApplicationAttemptsResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetApplicationAttemptsResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttempts" type="java.util.List"/>
+    </method>
+    <method name="getApplicationAttemptList" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a list of <code>ApplicationReport</code> of an application.
+
+ @return a list of <code>ApplicationReport</code> of an application]]>
+      </doc>
+    </method>
+    <method name="setApplicationAttemptList"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttempts" type="java.util.List"/>
+      <doc>
+      <![CDATA[Get a list of <code>ApplicationReport</code> of an application.
+
+ @param applicationAttempts
+          a list of <code>ApplicationReport</code> of an application]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>ResourceManager</code> to a client requesting
+ a list of {@link ApplicationAttemptReport} for application attempts.
+ </p>
+
+ <p>
+ The <code>ApplicationAttemptReport</code> for each application includes the
+ details of an application attempt.
+ </p>
+
+ @see ApplicationAttemptReport
+ @see ApplicationHistoryProtocol#getApplicationAttempts(GetApplicationAttemptsRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest -->
+  <class name="GetApplicationReportRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetApplicationReportRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+    </method>
+    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationId</code> of the application.
+ @return <code>ApplicationId</code> of the application]]>
+      </doc>
+    </method>
+    <method name="setApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationId</code> of the application
+ @param applicationId <code>ApplicationId</code> of the application]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by a client to the <code>ResourceManager</code> to
+ get an {@link ApplicationReport} for an application.</p>
+
+ <p>The request should include the {@link ApplicationId} of the
+ application.</p>
+
+ @see ApplicationClientProtocol#getApplicationReport(GetApplicationReportRequest)
+ @see ApplicationReport]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse -->
+  <class name="GetApplicationReportResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetApplicationReportResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getApplicationReport" return="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationReport</code> for the application.
+ @return <code>ApplicationReport</code> for the application]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The response sent by the <code>ResourceManager</code> to a client
+ requesting an application report.</p>
+
+ <p>The response includes an {@link ApplicationReport} which has details such
+ as user, queue, name, host on which the <code>ApplicationMaster</code> is
+ running, RPC port, tracking URL, diagnostics, start time etc.</p>
+
+ @see ApplicationClientProtocol#getApplicationReport(GetApplicationReportRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest -->
+  <class name="GetApplicationsRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetApplicationsRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scope" type="org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope"/>
+      <param name="users" type="java.util.Set"/>
+      <param name="queues" type="java.util.Set"/>
+      <param name="applicationTypes" type="java.util.Set"/>
+      <param name="applicationTags" type="java.util.Set"/>
+      <param name="appli

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/25] hadoop git commit: YARN-5082. Limit ContainerId increase in fair scheduler if the num of node app reserved reached the limit (sandflee via asuresh)

Posted by ae...@apache.org.
YARN-5082. Limit ContainerId increase in fair scheduler if the num of node app reserved reached the limit (sandflee via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5279af7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5279af7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5279af7c

Branch: refs/heads/HDFS-1312
Commit: 5279af7cd4afb090da742a96b5786d9dee6224bc
Parents: e0f4620
Author: Arun Suresh <as...@apache.org>
Authored: Fri Jun 10 22:33:42 2016 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Fri Jun 10 22:33:42 2016 -0700

----------------------------------------------------------------------
 .../scheduler/fair/FSAppAttempt.java            | 62 ++++++++++-------
 .../scheduler/fair/TestFairScheduler.java       | 72 ++++++++++++++++++++
 2 files changed, 108 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5279af7c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 5b83c9a..634f667 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -73,7 +73,7 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
       = new DefaultResourceCalculator();
 
   private long startTime;
-  private Priority priority;
+  private Priority appPriority;
   private ResourceWeights resourceWeights;
   private Resource demand = Resources.createResource(0);
   private FairScheduler scheduler;
@@ -107,7 +107,7 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
 
     this.scheduler = scheduler;
     this.startTime = scheduler.getClock().getTime();
-    this.priority = Priority.newInstance(1);
+    this.appPriority = Priority.newInstance(1);
     this.resourceWeights = new ResourceWeights();
   }
 
@@ -309,7 +309,7 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
     }
 
     // default level is NODE_LOCAL
-    if (! allowedLocalityLevel.containsKey(priority)) {
+    if (!allowedLocalityLevel.containsKey(priority)) {
       // add the initial time of priority to prevent comparing with FsApp
       // startTime and allowedLocalityLevel degrade
       lastScheduledContainer.put(priority, currentTimeMs);
@@ -353,7 +353,7 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
 
   synchronized public RMContainer allocate(NodeType type, FSSchedulerNode node,
       Priority priority, ResourceRequest request,
-      Container container) {
+      Container reservedContainer) {
     // Update allowed locality level
     NodeType allowed = allowedLocalityLevel.get(priority);
     if (allowed != null) {
@@ -373,9 +373,15 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
     if (getTotalRequiredResources(priority) <= 0) {
       return null;
     }
+
+    Container container = reservedContainer;
+    if (container == null) {
+      container =
+          createContainer(node, request.getCapability(), request.getPriority());
+    }
     
     // Create RMContainer
-    RMContainer rmContainer = new RMContainerImpl(container, 
+    RMContainer rmContainer = new RMContainerImpl(container,
         getApplicationAttemptId(), node.getNodeID(),
         appSchedulingInfo.getUser(), rmContext);
     ((RMContainerImpl)rmContainer).setQueueName(this.getQueueName());
@@ -485,21 +491,26 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
    * in {@link FSSchedulerNode}..
    * return whether reservation was possible with the current threshold limits
    */
-  private boolean reserve(Priority priority, FSSchedulerNode node,
-      Container container, NodeType type, boolean alreadyReserved) {
+  private boolean reserve(ResourceRequest request, FSSchedulerNode node,
+      Container reservedContainer, NodeType type) {
 
+    Priority priority = request.getPriority();
     if (!reservationExceedsThreshold(node, type)) {
       LOG.info("Making reservation: node=" + node.getNodeName() +
               " app_id=" + getApplicationId());
-      if (!alreadyReserved) {
-        getMetrics().reserveResource(getUser(), container.getResource());
+      if (reservedContainer == null) {
+        reservedContainer =
+            createContainer(node, request.getCapability(),
+              request.getPriority());
+        getMetrics().reserveResource(getUser(),
+            reservedContainer.getResource());
         RMContainer rmContainer =
-                super.reserve(node, priority, null, container);
+                super.reserve(node, priority, null, reservedContainer);
         node.reserveResource(this, priority, rmContainer);
         setReservation(node);
       } else {
         RMContainer rmContainer = node.getReservedContainer();
-        super.reserve(node, priority, rmContainer, container);
+        super.reserve(node, priority, rmContainer, reservedContainer);
         node.reserveResource(this, priority, rmContainer);
         setReservation(node);
       }
@@ -615,18 +626,17 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
     // How much does the node have?
     Resource available = node.getUnallocatedResource();
 
-    Container container = null;
+    Container reservedContainer = null;
     if (reserved) {
-      container = node.getReservedContainer().getContainer();
-    } else {
-      container = createContainer(node, capability, request.getPriority());
+      reservedContainer = node.getReservedContainer().getContainer();
     }
 
     // Can we allocate a container on this node?
     if (Resources.fitsIn(capability, available)) {
       // Inform the application of the new container for this request
       RMContainer allocatedContainer =
-          allocate(type, node, request.getPriority(), request, container);
+          allocate(type, node, request.getPriority(), request,
+              reservedContainer);
       if (allocatedContainer == null) {
         // Did the application need this resource?
         if (reserved) {
@@ -647,30 +657,30 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
       // the AM. Set the amResource for this app and update the leaf queue's AM
       // usage
       if (!isAmRunning() && !getUnmanagedAM()) {
-        setAMResource(container.getResource());
-        getQueue().addAMResourceUsage(container.getResource());
+        setAMResource(capability);
+        getQueue().addAMResourceUsage(capability);
         setAmRunning(true);
       }
 
-      return container.getResource();
+      return capability;
     }
 
     // The desired container won't fit here, so reserve
-    if (isReservable(container) &&
-        reserve(request.getPriority(), node, container, type, reserved)) {
+    if (isReservable(capability) &&
+        reserve(request, node, reservedContainer, type)) {
       return FairScheduler.CONTAINER_RESERVED;
     } else {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Not creating reservation as container " + container.getId()
-            + " is not reservable");
+        LOG.debug("Couldn't creating reservation for " +
+            getName() + ",at priority " +  request.getPriority());
       }
       return Resources.none();
     }
   }
 
-  private boolean isReservable(Container container) {
+  private boolean isReservable(Resource capacity) {
     return scheduler.isAtLeastReservationThreshold(
-      getQueue().getPolicy().getResourceCalculator(), container.getResource());
+        getQueue().getPolicy().getResourceCalculator(), capacity);
   }
 
   private boolean hasNodeOrRackLocalRequests(Priority priority) {
@@ -907,7 +917,7 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
   public Priority getPriority() {
     // Right now per-app priorities are not passed to scheduler,
     // so everyone has the same priority.
-    return priority;
+    return appPriority;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5279af7c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 3e5a40f..ec77a9b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -4480,4 +4480,76 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     resourceManager.getResourceScheduler().handle(nodeAddEvent1);
     return nm;
   }
+
+  @Test(timeout = 120000)
+  public void testContainerAllocationWithContainerIdLeap() throws Exception {
+    conf.setFloat(FairSchedulerConfiguration.RESERVABLE_NODES, 0.50f);
+    scheduler.init(conf);
+    scheduler.start();
+    scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+    // Add two node
+    RMNode node1 = MockNodes.newNodeInfo(1,
+        Resources.createResource(3072, 10), 1, "127.0.0.1");
+    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
+    scheduler.handle(nodeEvent1);
+
+    RMNode node2 = MockNodes.newNodeInfo(1,
+        Resources.createResource(3072, 10), 1, "127.0.0.2");
+    NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2);
+    scheduler.handle(nodeEvent2);
+
+    ApplicationAttemptId app1 =
+        createSchedulingRequest(2048, "queue1", "user1", 2);
+    scheduler.update();
+    scheduler.handle(new NodeUpdateSchedulerEvent(node1));
+    scheduler.handle(new NodeUpdateSchedulerEvent(node2));
+
+    ApplicationAttemptId app2 =
+        createSchedulingRequest(2048, "queue1", "user1", 1);
+    scheduler.update();
+    scheduler.handle(new NodeUpdateSchedulerEvent(node1));
+    scheduler.handle(new NodeUpdateSchedulerEvent(node2));
+
+    assertEquals(4096, scheduler.getQueueManager().getQueue("queue1").
+        getResourceUsage().getMemory());
+
+    //container will be reserved at node1
+    RMContainer reservedContainer1 =
+        scheduler.getSchedulerNode(node1.getNodeID()).getReservedContainer();
+    assertNotEquals(reservedContainer1, null);
+    RMContainer reservedContainer2 =
+        scheduler.getSchedulerNode(node2.getNodeID()).getReservedContainer();
+    assertEquals(reservedContainer2, null);
+
+    for (int i = 0; i < 10; i++) {
+      scheduler.handle(new NodeUpdateSchedulerEvent(node1));
+      scheduler.handle(new NodeUpdateSchedulerEvent(node2));
+    }
+
+    // release resource
+    scheduler.handle(new AppAttemptRemovedSchedulerEvent(
+        app1, RMAppAttemptState.KILLED, false));
+
+    assertEquals(0, scheduler.getQueueManager().getQueue("queue1").
+        getResourceUsage().getMemory());
+
+    // container will be allocated at node2
+    scheduler.handle(new NodeUpdateSchedulerEvent(node2));
+    assertEquals(scheduler.getSchedulerApp(app2).
+        getLiveContainers().size(), 1);
+
+    long maxId = 0;
+    for (RMContainer container :
+        scheduler.getSchedulerApp(app2).getLiveContainers()) {
+      assertTrue(
+          container.getContainer().getNodeId().equals(node2.getNodeID()));
+      if (container.getContainerId().getContainerId() > maxId) {
+        maxId = container.getContainerId().getContainerId();
+      }
+    }
+
+    long reservedId = reservedContainer1.getContainerId().getContainerId();
+    assertEquals(reservedId + 1, maxId);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org