You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2018/01/02 18:15:40 UTC

[01/38] hadoop git commit: HDFS-12930. Remove the extra space in HdfsImageViewer.md. Contributed by Rahul Pathak.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 c5dcf3a6e -> c04492ac8


HDFS-12930. Remove the extra space in HdfsImageViewer.md. Contributed by Rahul Pathak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25a36b74
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25a36b74
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25a36b74

Branch: refs/heads/HDFS-7240
Commit: 25a36b74528678f56c63be643c76d819d6f07840
Parents: c7499f2
Author: Yiqun Lin <yq...@apache.org>
Authored: Tue Dec 19 11:23:16 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Tue Dec 19 11:23:16 2017 +0800

----------------------------------------------------------------------
 .../hadoop-hdfs/src/site/markdown/HdfsImageViewer.md               | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25a36b74/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
index 9baadc0..bd3a797 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
@@ -99,7 +99,7 @@ The Web processor now supports the following operations:
 * [GETACLSTATUS](./WebHDFS.html#Get_ACL_Status)
 * [GETXATTRS](./WebHDFS.html#Get_an_XAttr)
 * [LISTXATTRS](./WebHDFS.html#List_all_XAttrs)
-* [CONTENTSUMMARY] (./WebHDFS.html#Get_Content_Summary_of_a_Directory)
+* [CONTENTSUMMARY](./WebHDFS.html#Get_Content_Summary_of_a_Directory)
 
 ### XML Processor
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/38] hadoop git commit: HADOOP-15143. NPE due to Invalid KerberosTicket in UGI. Contributed by Mukul Kumar Singh.

Posted by ae...@apache.org.
HADOOP-15143. NPE due to Invalid KerberosTicket in UGI. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d31c9d8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d31c9d8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d31c9d8c

Branch: refs/heads/HDFS-7240
Commit: d31c9d8c495794a803fb20729b5ed6b374e23eb4
Parents: 52babbb
Author: Jitendra Pandey <ji...@apache.org>
Authored: Wed Dec 27 23:17:07 2017 -0800
Committer: Jitendra Pandey <ji...@apache.org>
Committed: Wed Dec 27 23:17:07 2017 -0800

----------------------------------------------------------------------
 .../hadoop/security/UserGroupInformation.java   |  5 +-
 .../security/TestFixKerberosTicketOrder.java    | 77 ++++++++++++++++++++
 2 files changed, 81 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d31c9d8c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index f7aea31..726e811 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1253,7 +1253,10 @@ public class UserGroupInformation {
         Object cred = iter.next();
         if (cred instanceof KerberosTicket) {
           KerberosTicket ticket = (KerberosTicket) cred;
-          if (!ticket.getServer().getName().startsWith("krbtgt")) {
+          if (ticket.isDestroyed() || ticket.getServer() == null) {
+            LOG.warn("Ticket is already destroyed, remove it.");
+            iter.remove();
+          } else if (!ticket.getServer().getName().startsWith("krbtgt")) {
             LOG.warn(
                 "The first kerberos ticket is not TGT"
                     + "(the server principal is {}), remove and destroy it.",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d31c9d8c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java
index 4b75a36..cbea393 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java
@@ -155,4 +155,81 @@ public class TestFixKerberosTicketOrder extends KerberosSecurityTestcase {
             .filter(t -> t.getServer().getName().startsWith(server2Protocol))
             .findAny().isPresent());
   }
+
+  @Test
+  public void testWithDestroyedTGT() throws Exception {
+    UserGroupInformation ugi =
+        UserGroupInformation.loginUserFromKeytabAndReturnUGI(clientPrincipal,
+            keytabFile.getCanonicalPath());
+    ugi.doAs(new PrivilegedExceptionAction<Void>() {
+
+      @Override
+      public Void run() throws Exception {
+        SaslClient client = Sasl.createSaslClient(
+            new String[] {AuthMethod.KERBEROS.getMechanismName()},
+            clientPrincipal, server1Protocol, host, props, null);
+        client.evaluateChallenge(new byte[0]);
+        client.dispose();
+        return null;
+      }
+    });
+
+    Subject subject = ugi.getSubject();
+
+    // mark the ticket as destroyed
+    for (KerberosTicket ticket : subject
+        .getPrivateCredentials(KerberosTicket.class)) {
+      if (ticket.getServer().getName().startsWith("krbtgt")) {
+        ticket.destroy();
+        break;
+      }
+    }
+
+    ugi.fixKerberosTicketOrder();
+
+    // verify that after fixing, the tgt ticket should be removed
+    assertFalse("The first ticket is not tgt",
+        subject.getPrivateCredentials().stream()
+            .filter(c -> c instanceof KerberosTicket)
+            .map(c -> ((KerberosTicket) c).getServer().getName()).findFirst()
+            .isPresent());
+
+
+    // should fail as we send a service ticket instead of tgt to KDC.
+    intercept(SaslException.class,
+        () -> ugi.doAs(new PrivilegedExceptionAction<Void>() {
+
+          @Override
+          public Void run() throws Exception {
+            SaslClient client = Sasl.createSaslClient(
+                new String[] {AuthMethod.KERBEROS.getMechanismName()},
+                clientPrincipal, server2Protocol, host, props, null);
+            client.evaluateChallenge(new byte[0]);
+            client.dispose();
+            return null;
+          }
+        }));
+
+    // relogin to get a new ticket
+    ugi.reloginFromKeytab();
+
+    // make sure we can get new service ticket after the relogin.
+    ugi.doAs(new PrivilegedExceptionAction<Void>() {
+
+      @Override
+      public Void run() throws Exception {
+        SaslClient client = Sasl.createSaslClient(
+            new String[] {AuthMethod.KERBEROS.getMechanismName()},
+            clientPrincipal, server2Protocol, host, props, null);
+        client.evaluateChallenge(new byte[0]);
+        client.dispose();
+        return null;
+      }
+    });
+
+    assertTrue("No service ticket for " + server2Protocol + " found",
+        subject.getPrivateCredentials(KerberosTicket.class).stream()
+            .filter(t -> t.getServer().getName().startsWith(server2Protocol))
+            .findAny().isPresent());
+  }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/38] hadoop git commit: YARN-7032. [ATSv2] NPE while starting hbase co-processor when HBase authorization is enabled. Contributed by Rohith Sharma K S.

Posted by ae...@apache.org.
YARN-7032. [ATSv2] NPE while starting hbase co-processor when HBase authorization is enabled. Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d62932c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d62932c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d62932c3

Branch: refs/heads/HDFS-7240
Commit: d62932c3b2fcacc81dc1f5048cdeb60fb0d38504
Parents: 41b5810
Author: Sunil G <su...@apache.org>
Authored: Wed Dec 20 11:31:15 2017 +0530
Committer: Sunil G <su...@apache.org>
Committed: Wed Dec 20 11:31:15 2017 +0530

----------------------------------------------------------------------
 .../server/timelineservice/storage/flow/FlowRunCoprocessor.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62932c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
index 359eec9..96a7cf3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
@@ -98,7 +98,9 @@ public class FlowRunCoprocessor extends BaseRegionObserver {
     if ((attributes != null) && (attributes.size() > 0)) {
       for (Map.Entry<String, byte[]> attribute : attributes.entrySet()) {
         Tag t = HBaseTimelineStorageUtils.getTagFromAttribute(attribute);
-        tags.add(t);
+        if (t != null) {
+          tags.add(t);
+        }
       }
       byte[] tagByteArray = Tag.fromList(tags);
       NavigableMap<byte[], List<Cell>> newFamilyMap = new TreeMap<>(


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/38] hadoop git commit: YARN-7580. ContainersMonitorImpl logged message lacks detail when exceeding memory limits. Contributed by Wilfred Spiegelenburg.

Posted by ae...@apache.org.
YARN-7580. ContainersMonitorImpl logged message lacks detail when exceeding memory limits. Contributed by Wilfred Spiegelenburg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b82049b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b82049b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b82049b4

Branch: refs/heads/HDFS-7240
Commit: b82049b4f0065b76c3eb590d57eb5bf0ebc2f204
Parents: 6e3e1b8
Author: Miklos Szegedi <sz...@apache.org>
Authored: Fri Dec 29 12:35:49 2017 -0800
Committer: Miklos Szegedi <sz...@apache.org>
Committed: Fri Dec 29 12:49:37 2017 -0800

----------------------------------------------------------------------
 .../monitor/ContainersMonitorImpl.java            | 18 +++++++++++++-----
 .../monitor/TestContainersMonitor.java            |  4 ++--
 2 files changed, 15 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b82049b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index 48ec147..bc28646 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -650,26 +650,34 @@ public class ContainersMonitorImpl extends AbstractService implements
       if (isVmemCheckEnabled()
               && isProcessTreeOverLimit(containerId.toString(),
               currentVmemUsage, curMemUsageOfAgedProcesses, vmemLimit)) {
+        // The current usage (age=0) is always higher than the aged usage. We
+        // do not show the aged size in the message, base the delta on the
+        // current usage
+        long delta = currentVmemUsage - vmemLimit;
         // Container (the root process) is still alive and overflowing
         // memory.
         // Dump the process-tree and then clean it up.
         msg = formatErrorMessage("virtual",
                 formatUsageString(currentVmemUsage, vmemLimit,
                   currentPmemUsage, pmemLimit),
-                pId, containerId, pTree);
+                pId, containerId, pTree, delta);
         isMemoryOverLimit = true;
         containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_VMEM;
       } else if (isPmemCheckEnabled()
               && isProcessTreeOverLimit(containerId.toString(),
               currentPmemUsage, curRssMemUsageOfAgedProcesses,
               pmemLimit)) {
+        // The current usage (age=0) is always higher than the aged usage. We
+        // do not show the aged size in the message, base the delta on the
+        // current usage
+        long delta = currentPmemUsage - pmemLimit;
         // Container (the root process) is still alive and overflowing
         // memory.
         // Dump the process-tree and then clean it up.
         msg = formatErrorMessage("physical",
                 formatUsageString(currentVmemUsage, vmemLimit,
                   currentPmemUsage, pmemLimit),
-                pId, containerId, pTree);
+                pId, containerId, pTree, delta);
         isMemoryOverLimit = true;
         containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM;
       }
@@ -726,11 +734,11 @@ public class ContainersMonitorImpl extends AbstractService implements
      */
     private String formatErrorMessage(String memTypeExceeded,
         String usageString, String pId, ContainerId containerId,
-        ResourceCalculatorProcessTree pTree) {
+        ResourceCalculatorProcessTree pTree, long delta) {
       return
         String.format("Container [pid=%s,containerID=%s] is " +
-            "running beyond %s memory limits. ",
-            pId, containerId, memTypeExceeded) +
+            "running %dB beyond the '%S' memory limit. ",
+            pId, containerId, delta, memTypeExceeded) +
         "Current usage: " + usageString +
         ". Killing container.\n" +
         "Dump of the process-tree for " + containerId + " :\n" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b82049b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
index 5f72a4c..412b8cd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
@@ -332,8 +332,8 @@ public class TestContainersMonitor extends BaseContainerManagerTest {
     Assert.assertEquals(ContainerExitStatus.KILLED_EXCEEDED_VMEM,
         containerStatus.getExitStatus());
     String expectedMsgPattern =
-        "Container \\[pid=" + pid + ",containerID=" + cId
-            + "\\] is running beyond virtual memory limits. Current usage: "
+        "Container \\[pid=" + pid + ",containerID=" + cId + "\\] is running "
+            + "[0-9]+B beyond the 'VIRTUAL' memory limit. Current usage: "
             + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; "
             + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. "
             + "Killing container.\nDump of the process-tree for "


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/38] hadoop git commit: YARN-7620. Allow node partition filters on Queues page of new YARN UI. Contributed by Vasudevan Skm.

Posted by ae...@apache.org.
YARN-7620. Allow node partition filters on Queues page of new YARN UI. Contributed by Vasudevan Skm.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe5b057c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe5b057c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe5b057c

Branch: refs/heads/HDFS-7240
Commit: fe5b057c8144d01ef9fdfb2639a2cba97ead8144
Parents: e040c97
Author: Sunil G <su...@apache.org>
Authored: Tue Dec 19 20:27:25 2017 +0530
Committer: Sunil G <su...@apache.org>
Committed: Tue Dec 19 20:27:25 2017 +0530

----------------------------------------------------------------------
 .../webapp/app/components/queue-navigator.js    |  26 +-
 .../main/webapp/app/components/tree-selector.js | 301 ++++++++++++-------
 .../yarn-queue-partition-capacity-labels.js     |  48 +++
 .../src/main/webapp/app/constants.js            |   4 +-
 .../main/webapp/app/controllers/yarn-queues.js  |  36 ++-
 .../app/models/yarn-queue/capacity-queue.js     |  56 ++--
 .../serializers/yarn-queue/capacity-queue.js    |   6 +
 .../src/main/webapp/app/styles/app.scss         |   1 +
 .../src/main/webapp/app/styles/yarn-queues.scss |  29 ++
 .../templates/components/queue-navigator.hbs    |  20 +-
 .../yarn-queue-partition-capacity-labels.hbs    |  54 ++++
 .../components/yarn-queue/capacity-queue.hbs    |  34 +--
 .../main/webapp/app/templates/yarn-queues.hbs   |   3 +-
 ...yarn-queue-partition-capacity-labels-test.js |  43 +++
 14 files changed, 478 insertions(+), 183 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-navigator.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-navigator.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-navigator.js
index 4b741b8..2cecefb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-navigator.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-navigator.js
@@ -16,7 +16,27 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
+import Ember from "ember";
 
-export default Ember.Component.extend({
-});
\ No newline at end of file
+export default Ember.Component.extend(Ember.TargetActionSupport,{
+  actions: {
+    filterQueuesByPartition(filter) {
+      this.set("filteredPartition", filter);
+      this.sendAction("setFilter", filter);
+    }
+  },
+  didInsertElement: function() {
+    $(".js-filter-queue-by-labels").select2({
+      width: "350px",
+      multiple: false
+    });
+
+    $(".js-filter-queue-by-labels").on("select2:select", e => {
+      this.triggerAction({
+        action: "filterQueuesByPartition",
+        target: this,
+        actionContext: e.params.data.text
+      });
+    });
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
index 4645a48..5168c0e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
@@ -16,19 +16,20 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
+import Ember from "ember";
+import {PARTITION_LABEL} from '../constants';
 
 const INBETWEEN_HEIGHT = 130;
 
 export default Ember.Component.extend({
   // Map: <queue-name, queue>
-  map : undefined,
+  map: undefined,
 
   // Normalized data for d3
   treeData: undefined,
 
   // folded queues, folded[<queue-name>] == true means <queue-name> is folded
-  foldedQueues: { },
+  foldedQueues: {},
 
   // maxDepth
   maxDepth: 0,
@@ -42,17 +43,23 @@ export default Ember.Component.extend({
   used: undefined,
   max: undefined,
 
+  didUpdateAttrs: function({ oldAttrs, newAttrs }) {
+    if (oldAttrs.filteredPartition.value !== newAttrs.filteredPartition.value) {
+      this.reDraw();
+    }
+  },
   // Init data
   initData: function() {
-    this.map = { };
-    this.treeData = { };
+    this.map = {};
+    this.treeData = {};
     this.maxDepth = 0;
     this.numOfLeafQueue = 0;
 
-    this.get("model")
-      .forEach(function(o) {
+    this.get("model").forEach(
+      function(o) {
         this.map[o.id] = o;
-      }.bind(this));
+      }.bind(this)
+    );
 
     // var selected = this.get("selected");
     this.used = this.get("used");
@@ -81,9 +88,9 @@ export default Ember.Component.extend({
 
   // Init queues
   initQueue: function(queueName, depth, node) {
-    if ((!queueName) || (!this.map[queueName])) {
+    if (!queueName || !this.map[queueName]) {
       // Queue is not existed
-      return;
+      return false;
     }
     if (depth > this.maxDepth) {
       this.maxDepth = this.maxDepth + 1;
@@ -91,6 +98,13 @@ export default Ember.Component.extend({
 
     var queue = this.map[queueName];
 
+    if (
+      this.filteredPartition &&
+      !queue.get("partitions").contains(this.filteredPartition)
+    ) {
+      return false;
+    }
+
     var names = this.getChildrenNamesArray(queue);
 
     node.name = queueName;
@@ -100,14 +114,21 @@ export default Ember.Component.extend({
     if (names.length > 0) {
       node.children = [];
 
-      names.forEach(function(name) {
-        var childQueueData = {};
-        node.children.push(childQueueData);
-        this.initQueue(name, depth + 1, childQueueData);
-      }.bind(this));
+      names.forEach(
+        function(name) {
+          var childQueueData = {};
+          node.children.push(childQueueData);
+          const status = this.initQueue(name, depth + 1, childQueueData);
+          if (!status) {
+            node.children.pop();
+          }
+        }.bind(this)
+      );
     } else {
       this.numOfLeafQueue = this.numOfLeafQueue + 1;
     }
+
+    return true;
   },
 
   update: function(source, root, tree, diagonal) {
@@ -119,141 +140,183 @@ export default Ember.Component.extend({
     var links = tree.links(nodes);
 
     // Normalize for fixed-depth.
-    nodes.forEach(function(d) { d.y = d.depth * 200; });
+    nodes.forEach(function(d) {
+      d.y = d.depth * 200;
+    });
 
     // Update the nodes…
-    var node = this.mainSvg.selectAll("g.node")
-      .data(nodes, function(d) { return d.id || (d.id = ++i); });
+    var node = this.mainSvg.selectAll("g.node").data(nodes, function(d) {
+      return d.id || (d.id = ++i);
+    });
 
     // Enter any new nodes at the parent's previous position.
-    var nodeEnter = node.enter().append("g")
+    var nodeEnter = node
+      .enter()
+      .append("g")
       .attr("class", "node")
-      .attr("transform", function() { return "translate(" + source.y0 + "," + source.x0 + ")"; })
-      .on("click", function(d){
-        if (d.queueData.get("name") !== this.get("selected")) {
-            document.location.href = "#/yarn-queues/" + d.queueData.get("name") + "!";
-        }
-
-        Ember.run.later(this, function () {
-          var treeWidth = this.maxDepth * 200;
-          var treeHeight = this.numOfLeafQueue * INBETWEEN_HEIGHT;
-          var tree = d3.layout.tree().size([treeHeight, treeWidth]);
-          var diagonal = d3.svg.diagonal()
-            .projection(function(d) { return [d.y, d.x]; });
-
-          this.update(this.treeData, this.treeData, tree, diagonal);
-        }, 100);
-
-      }.bind(this))
-    .on("dblclick", function (d) {
-      document.location.href = "#/yarn-queue/" + d.queueData.get("name") + "/apps";
-    });
+      .attr("transform", function() {
+        return `translate(${source.y0 + 50}, ${source.x0})`;
+      })
+      .on(
+        "click",
+        function(d) {
+          if (d.queueData.get("name") !== this.get("selected")) {
+            document.location.href =
+              "#/yarn-queues/" + d.queueData.get("name") + "!";
+          }
+
+          Ember.run.later(
+            this,
+            function() {
+              var treeWidth = this.maxDepth * 200;
+              var treeHeight = this.numOfLeafQueue * INBETWEEN_HEIGHT;
+              var tree = d3.layout.tree().size([treeHeight, treeWidth]);
+              var diagonal = d3.svg.diagonal().projection(function(d) {
+                return [d.y + 50, d.x];
+              });
+
+              this.update(this.treeData, this.treeData, tree, diagonal);
+            },
+            100
+          );
+        }.bind(this)
+      )
+      .on("dblclick", function(d) {
+        document.location.href =
+          "#/yarn-queue/" + d.queueData.get("name") + "/apps";
+      });
 
-    nodeEnter.append("circle")
+    nodeEnter
+      .append("circle")
       .attr("r", 1e-6)
-      .style("fill", function(d) {
-        var maxCap = d.queueData.get(this.max);
-        maxCap = maxCap === undefined ? 100 : maxCap;
-        var usedCap = d.queueData.get(this.used) / maxCap * 100.0;
-        if (usedCap <= 60.0) {
-          return "mediumaquamarine";
-        } else if (usedCap <= 100.0) {
-          return "coral";
-        } else {
-          return "salmon";
-        }
-      }.bind(this));
+      .style(
+        "fill",
+        function(d) {
+          const usedCapacity = getUsedCapacity(d.queueData.get("partitionMap"), this.filteredPartition);
+          if (usedCapacity <= 60.0) {
+            return "#60cea5";
+          } else if (usedCapacity <= 100.0) {
+            return "#ffbc0b";
+          } else {
+            return "#ef6162";
+          }
+        }.bind(this)
+      );
 
     // append percentage
-    nodeEnter.append("text")
-      .attr("x", function() { return 0; })
+    nodeEnter
+      .append("text")
+      .attr("x", function() {
+        return 0;
+      })
       .attr("dy", ".35em")
       .attr("fill", "white")
-      .attr("text-anchor", function() { return "middle"; })
-      .text(function(d) {
-        var maxCap = d.queueData.get(this.max);
-        maxCap = maxCap === undefined ? 100 : maxCap;
-        var usedCap = d.queueData.get(this.used) / maxCap * 100.0;
-        if (usedCap >= 100.0) {
-          return usedCap.toFixed(0) + "%";
-        } else {
-          return usedCap.toFixed(1) + "%";
-        }
-      }.bind(this))
+      .attr("text-anchor", function() {
+        return "middle";
+      })
+      .text(
+        function(d) {
+          const usedCapacity = getUsedCapacity(d.queueData.get("partitionMap"), this.filteredPartition);
+          if (usedCapacity >= 100.0) {
+            return usedCapacity.toFixed(0) + "%";
+          } else {
+            return usedCapacity.toFixed(1) + "%";
+          }
+        }.bind(this)
+      )
       .style("fill-opacity", 1e-6);
 
     // append queue name
-    nodeEnter.append("text")
+    nodeEnter
+      .append("text")
       .attr("x", "0px")
       .attr("dy", "45px")
       .attr("text-anchor", "middle")
-      .text(function(d) { return d.name; })
+      .text(function(d) {
+        return d.name;
+      })
       .style("fill-opacity", 1e-6);
 
     // Transition nodes to their new position.
-    var nodeUpdate = node.transition()
+    var nodeUpdate = node
+      .transition()
       .duration(duration)
-      .attr("transform", function(d) { return "translate(" + d.y + "," + d.x + ")"; });
+      .attr("transform", function(d) {
+        return `translate(${d.y + 50}, ${d.x})`;
+      });
 
-    nodeUpdate.select("circle")
+    nodeUpdate
+      .select("circle")
       .attr("r", 30)
-      .attr("href",
+      .attr("href", function(d) {
+        return "#/yarn-queues/" + d.queueData.get("name");
+      })
+      .style(
+        "stroke-width",
+        function(d) {
+          if (d.queueData.get("name") === this.get("selected")) {
+            return 7;
+          } else {
+            return 2;
+          }
+        }.bind(this)
+      )
+      .style(
+        "stroke",
         function(d) {
-          return "#/yarn-queues/" + d.queueData.get("name");
-        })
-      .style("stroke-width", function(d) {
-        if (d.queueData.get("name") === this.get("selected")) {
-          return 7;
-        } else {
-          return 2;
-        }
-      }.bind(this))
-      .style("stroke", function(d) {
-        if (d.queueData.get("name") === this.get("selected")) {
-          return "gray";
-        } else {
-          return "gray";
-        }
-      }.bind(this));
-
-    nodeUpdate.selectAll("text")
-      .style("fill-opacity", 1);
+          if (d.queueData.get("name") === this.get("selected")) {
+            return "gray";
+          } else {
+            return "gray";
+          }
+        }.bind(this)
+      );
+
+    nodeUpdate.selectAll("text").style("fill-opacity", 1);
 
     // Transition exiting nodes to the parent's new position.
-    var nodeExit = node.exit().transition()
+    var nodeExit = node
+      .exit()
+      .transition()
       .duration(duration)
-      .attr("transform", function() { return "translate(" + source.y + "," + source.x + ")"; })
+      .attr("transform", function() {
+        return `translate(${source.y}, ${source.x})`;
+      })
       .remove();
 
-    nodeExit.select("circle")
-      .attr("r", 1e-6);
+    nodeExit.select("circle").attr("r", 1e-6);
 
-    nodeExit.select("text")
-      .style("fill-opacity", 1e-6);
+    nodeExit.select("text").style("fill-opacity", 1e-6);
 
     // Update the links…
-    var link = this.mainSvg.selectAll("path.link")
-      .data(links, function(d) { return d.target.id; });
+    var link = this.mainSvg.selectAll("path.link").data(links, function(d) {
+      return d.target.id;
+    });
 
     // Enter any new links at the parent's previous position.
-    link.enter().insert("path", "g")
+    link
+      .enter()
+      .insert("path", "g")
       .attr("class", "link")
       .attr("d", function() {
-        var o = {x: source.x0, y: source.y0};
-        return diagonal({source: o, target: o});
+        var o = { x: source.x0, y: source.y0 + 50 };
+        return diagonal({ source: o, target: o });
       });
 
     // Transition links to their new position.
-    link.transition()
+    link
+      .transition()
       .duration(duration)
       .attr("d", diagonal);
 
     // Transition exiting nodes to the parent's new position.
-    link.exit().transition()
+    link
+      .exit()
+      .transition()
       .duration(duration)
       .attr("d", function() {
-        var o = {x: source.x, y: source.y};
-        return diagonal({source: o, target: o});
+        var o = { x: source.x, y: source.y };
+        return diagonal({ source: o, target: o });
       })
       .remove();
 
@@ -267,27 +330,32 @@ export default Ember.Component.extend({
   reDraw: function() {
     this.initData();
 
-    var margin = {top: 20, right: 120, bottom: 20, left: 120};
+    var margin = { top: 20, right: 120, bottom: 20, left: 120 };
     var treeWidth = this.maxDepth * 200;
     var treeHeight = this.numOfLeafQueue * INBETWEEN_HEIGHT;
     var width = treeWidth + margin.left + margin.right;
     var height = treeHeight + margin.top + margin.bottom;
 
     if (this.mainSvg) {
-      this.mainSvg.remove();
+      this.mainSvg.selectAll("*").remove();
+    } else {
+      this.mainSvg = d3
+        .select("#" + this.get("parentId"))
+        .append("svg")
+        .attr("width", width)
+        .attr("height", height)
+        .attr("class", "tree-selector");
     }
 
-    this.mainSvg = d3.select("#" + this.get("parentId")).append("svg")
-      .attr("width", width)
-      .attr("height", height)
-      .attr("class", "tree-selector")
+    this.mainSvg
       .append("g")
       .attr("transform", "translate(" + margin.left + "," + margin.top + ")");
 
     var tree = d3.layout.tree().size([treeHeight, treeWidth]);
 
-    var diagonal = d3.svg.diagonal()
-      .projection(function(d) { return [d.y, d.x]; });
+    var diagonal = d3.svg.diagonal().projection(function(d) {
+      return [d.y + 50, d.x];
+    });
 
     var root = this.treeData;
     root.x0 = height / 2;
@@ -299,6 +367,11 @@ export default Ember.Component.extend({
   },
 
   didInsertElement: function() {
-   this.reDraw();
+    this.reDraw();
   }
 });
+
+
+const getUsedCapacity = (partitionMap, filter=PARTITION_LABEL) => {
+  return partitionMap[filter].absoluteUsedCapacity;
+};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/yarn-queue-partition-capacity-labels.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/yarn-queue-partition-capacity-labels.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/yarn-queue-partition-capacity-labels.js
new file mode 100644
index 0000000..e7f9c03
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/yarn-queue-partition-capacity-labels.js
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from "ember";
+import { PARTITION_LABEL } from "../constants";
+
+export default Ember.Component.extend({
+  didUpdateAttrs: function({ oldAttrs, newAttrs }) {
+    this._super(...arguments);
+    this.set("data", this.initData());
+  },
+
+  init() {
+    this._super(...arguments);
+    this.set("data", this.initData());
+  },
+
+  initData() {
+    const queue = this.get("queue");
+    const partitionMap = this.get("partitionMap");
+    const filteredParition = this.get("filteredPartition") || PARTITION_LABEL;
+    const userLimit = queue.get("userLimit");
+    const userLimitFactor = queue.get("userLimitFactor");
+    const isLeafQueue = queue.get("isLeafQueue");
+
+    return {
+      ...partitionMap[filteredParition],
+      userLimit,
+      userLimitFactor,
+      isLeafQueue
+    };
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/constants.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/constants.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/constants.js
index 29ad4bc..6b37b7f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/constants.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/constants.js
@@ -34,4 +34,6 @@ export const Entities = {
   Memory:'memory',
   Resource: 'resource',
   Unit: 'unit'
-}
\ No newline at end of file
+}
+
+export const PARTITION_LABEL = 'Default partition';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-queues.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-queues.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-queues.js
index 9658ded..6cc8767 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-queues.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-queues.js
@@ -17,19 +17,39 @@
  */
 
 import Ember from 'ember';
+import {PARTITION_LABEL} from '../constants';
 
 export default Ember.Controller.extend({
   needReload: true,
   selectedQueue: undefined,
   showLoading: true,
+  filteredPartition: PARTITION_LABEL,
 
-  breadcrumbs: [{
-    text: "Home",
-    routeName: 'application'
-  }, {
-    text: "Queues",
-    routeName: 'yarn-queues',
-    model: 'root'
-  }]
+  breadcrumbs: [
+    {
+      text: "Home",
+      routeName: "application"
+    },
+    {
+      text: "Queues",
+      routeName: "yarn-queues",
+      model: "root"
+    }
+  ],
 
+  actions: {
+    setFilter(partition) {
+      this.set("filteredPartition", partition);
+      const model = this.get('model');
+      const {selectedQueue} = model;
+      // If the selected queue does not have the filtered partition
+      // reset it to root
+      if (!selectedQueue.get('partitions').contains(partition)) {
+        const root = model.queues.get('firstObject');
+        document.location.href = "#/yarn-queues/" + root.get("id") + "!";
+        this.set("model.selectedQueue", root);
+        this.set("model.selected", root.get('id'));
+      }
+    }
+  }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js
index f892c2b..c123989 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js
@@ -20,24 +20,26 @@ import DS from 'ember-data';
 import Converter from 'yarn-ui/utils/converter';
 
 export default DS.Model.extend({
-  name: DS.attr('string'),
-  children: DS.attr('array'),
-  parent: DS.attr('string'),
-  capacity: DS.attr('number'),
-  maxCapacity: DS.attr('number'),
-  usedCapacity: DS.attr('number'),
-  absCapacity: DS.attr('number'),
-  absMaxCapacity: DS.attr('number'),
-  absUsedCapacity: DS.attr('number'),
-  state: DS.attr('string'),
-  userLimit: DS.attr('number'),
-  userLimitFactor: DS.attr('number'),
-  preemptionDisabled: DS.attr('number'),
-  numPendingApplications: DS.attr('number'),
-  numActiveApplications: DS.attr('number'),
-  users: DS.hasMany('YarnUser'),
-  type: DS.attr('string'),
-  resources: DS.attr('object'),
+  name: DS.attr("string"),
+  children: DS.attr("array"),
+  parent: DS.attr("string"),
+  capacity: DS.attr("number"),
+  partitions: DS.attr("array"),
+  partitionMap: DS.attr("object"),
+  maxCapacity: DS.attr("number"),
+  usedCapacity: DS.attr("number"),
+  absCapacity: DS.attr("number"),
+  absMaxCapacity: DS.attr("number"),
+  absUsedCapacity: DS.attr("number"),
+  state: DS.attr("string"),
+  userLimit: DS.attr("number"),
+  userLimitFactor: DS.attr("number"),
+  preemptionDisabled: DS.attr("number"),
+  numPendingApplications: DS.attr("number"),
+  numActiveApplications: DS.attr("number"),
+  users: DS.hasMany("YarnUser"),
+  type: DS.attr("string"),
+  resources: DS.attr("object"),
 
   isLeafQueue: function() {
     var len = this.get("children.length");
@@ -53,21 +55,29 @@ export default DS.Model.extend({
       {
         label: "Absolute Used",
         style: "primary",
-        value: this.get("name") === "root" ? floatToFixed(this.get("usedCapacity")) : floatToFixed(this.get("absUsedCapacity"))
+        value:
+          this.get("name") === "root"
+            ? floatToFixed(this.get("usedCapacity"))
+            : floatToFixed(this.get("absUsedCapacity"))
       },
       {
         label: "Absolute Capacity",
         style: "primary",
-        value: this.get("name") === "root" ? 100 : floatToFixed(this.get("absCapacity"))
+        value:
+          this.get("name") === "root"
+            ? 100
+            : floatToFixed(this.get("absCapacity"))
       },
       {
         label: "Absolute Max Capacity",
         style: "secondary",
-        value: this.get("name") === "root" ? 100 : floatToFixed(this.get("absMaxCapacity"))
+        value:
+          this.get("name") === "root"
+            ? 100
+            : floatToFixed(this.get("absMaxCapacity"))
       }
     ];
   }.property("absCapacity", "usedCapacity", "absMaxCapacity"),
-
   userUsagesDonutChartData: function() {
     var data = [];
     if (this.get("users")) {
@@ -97,5 +107,5 @@ export default DS.Model.extend({
         value: this.get("numActiveApplications") || 0
       }
     ];
-  }.property("numPendingApplications", "numActiveApplications"),
+  }.property("numPendingApplications", "numActiveApplications")
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
index 7626598..b171c6e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
@@ -17,6 +17,7 @@
  */
 
 import DS from 'ember-data';
+import {PARTITION_LABEL} from '../../constants';
 
 export default DS.JSONAPISerializer.extend({
 
@@ -73,6 +74,11 @@ export default DS.JSONAPISerializer.extend({
           numPendingApplications: payload.numPendingApplications,
           numActiveApplications: payload.numActiveApplications,
           resources: payload.resources,
+          partitions: payload.capacities.queueCapacitiesByPartition.map(cap => cap.partitionName || PARTITION_LABEL),
+          partitionMap: payload.capacities.queueCapacitiesByPartition.reduce((init, cap) => {
+            init[cap.partitionName || PARTITION_LABEL] = cap;
+            return init;
+          }, {}),
           type: "capacity",
         },
         // Relationships

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
index 3919ac3..5d99d8e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
@@ -3,6 +3,7 @@
 @import 'yarn-app.scss';
 @import './compose-box.scss';
 @import 'em-table.scss';
+@import './yarn-queues.scss';
 
 /**
  * Licensed to the Apache Software Foundation (ASF) under one

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/yarn-queues.scss
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/yarn-queues.scss b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/yarn-queues.scss
new file mode 100644
index 0000000..8852270
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/yarn-queues.scss
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+.filter-partitions {
+  padding: 15px;
+  margin-left: auto;
+  label {
+    font-weight: 500;
+  }
+  .filter-queue-by-labels {
+    display: inline-block;
+    max-width: 350px;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs
index e3b0a90..b063aae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs
@@ -21,9 +21,25 @@
   <div class="col-md-12 container-fluid">
     <div class="panel panel-default" id="tree-selector-container">
       <div class="panel-heading">
-        Scheduler: {{model.firstObject.type}}
+        {{#if filteredPartition}}
+           {{model.firstObject.type}} scheduler - Showing queues from partition {{lower filteredPartition}}
+        {{else}}
+          {{model.firstObject.type}} scheduler - Showing queues from all partitions
+        {{/if}}
       </div>
-     {{tree-selector model=model parentId="tree-selector-container" selected=selected used=used max=max}}
+       {{#if (eq model.firstObject.type "capacity")}}
+       <div class="flex">
+        <div class="filter-partitions flex-right">
+          <label><i class="glyphicon glyphicon-filter"/> Partitions: </label>
+            <select onchange={{action "filterQueuesByPartition" value="target.value"}} class="form-control js-filter-queue-by-labels">
+              {{#each model.firstObject.partitions as |part|}}
+                <option value={{part}}>{{part}}</option>
+              {{/each}}
+            </select>
+        </div>
+       </div>
+      {{/if}}
+     {{tree-selector model=model parentId="tree-selector-container" selected=selected used=used max=max filteredPartition=filteredPartition}}
     </div>
   </div>
 </div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue-partition-capacity-labels.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue-partition-capacity-labels.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue-partition-capacity-labels.hbs
new file mode 100644
index 0000000..fdecb2d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue-partition-capacity-labels.hbs
@@ -0,0 +1,54 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="top-1">
+  <span class="yarn-label primary">
+    <span class="label-key">absolute used</span>
+    <span class="label-value">{{data.absoluteUsedCapacity}}%</span>
+  </span>
+  <span class="yarn-label primary">
+    <span class="label-key">absolute capacity</span>
+    <span class="label-value">{{data.absoluteCapacity}}%</span>
+  </span>
+  <span class="yarn-label secondary">
+    <span class="label-key">absolute max capacity</span>
+    <span class="label-value">{{data.absoluteMaxCapacity}}%</span>
+  </span>
+</div>
+<div class="top-1">
+  <span class="yarn-label secondary">
+    <span class="label-key">configured capacity</span>
+    <span class="label-value">{{data.capacity}}%</span>
+  </span>
+  <span class="yarn-label secondary">
+    <span class="label-key">configured max capacity</span>
+    <span class="label-value">{{data.maxCapacity}}%</span>
+  </span>
+</div>
+{{#if data.isLeafQueue}}
+<div class="top-1">
+  <span class="yarn-label secondary">
+    <span class="label-key">user limit</span>
+    <span class="label-value">{{data.userLimit}}%</span>
+  </span>
+  <span class="yarn-label secondary">
+    <span class="label-key">user limit factor</span>
+    <span class="label-value">{{data.userLimitFactor}}</span>
+  </span>
+</div>
+{{/if}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
index bb9a87e..9ad2a6f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
@@ -17,7 +17,7 @@
 }}
 
 {{queue-navigator model=model.queues selected=model.selected
-  used="usedCapacity" max="absMaxCapacity"}}
+  used="usedCapacity" max="absMaxCapacity" setFilter=(action setFilter)}}
 
 <div class="yarn-compose-box yarn-queues-container">
   <div>
@@ -31,36 +31,8 @@
         {{em-table-simple-status-cell content=model.selectedQueue.state}}
       </div>
     {{/if}}
-    <div class="top-1">
-      {{#each model.selectedQueue.capacitiesBarChartData as |item|}}
-        <span class="yarn-label {{item.style}}">
-          <span class="label-key"> {{lower item.label}}</span>
-          <span class="label-value">{{item.value}}%</span>
-        </span>
-      {{/each}}
-    </div>
-    <div class="top-1">
-      <span class="yarn-label secondary">
-        <span class="label-key">configured capacity</span>
-        <span class="label-value">{{model.selectedQueue.capacity}}%</span>
-      </span>
-      <span class="yarn-label secondary">
-        <span class="label-key">configured max capacity</span>
-        <span class="label-value">{{model.selectedQueue.maxCapacity}}%</span>
-      </span>
-    </div>
-    {{#if model.selectedQueue.isLeafQueue}}
-      <div class="top-1">
-        <span class="yarn-label secondary">
-          <span class="label-key">user limit</span>
-          <span class="label-value">{{model.selectedQueue.userLimit}}%</span>
-        </span>
-        <span class="yarn-label secondary">
-          <span class="label-key">user limit factor</span>
-          <span class="label-value">{{model.selectedQueue.userLimitFactor}}</span>
-        </span>
-      </div>
-    {{/if}}
+
+    {{yarn-queue-partition-capacity-labels partitionMap=model.selectedQueue.partitionMap queue=model.selectedQueue filteredPartition=filteredPartition}}
   </div>
 
   <h5> Running Apps </h5>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
index b3165d5..ede2994 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
@@ -18,9 +18,10 @@
 <div class="queue-page-breadcrumb">
   {{breadcrumb-bar breadcrumbs=breadcrumbs}}
 </div>
+
 <div class="container-fluid">
   {{#if (eq model.queues.firstObject.type "capacity")}}
-    {{yarn-queue.capacity-queue model=model}}
+    {{yarn-queue.capacity-queue model=model setFilter=(action "setFilter") filteredPartition=filteredPartition}}
   {{else if (eq model.queues.firstObject.type "fair")}}
     {{yarn-queue.fair-queue model=model}}
   {{else}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/yarn-queue-partition-capacity-labels-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/yarn-queue-partition-capacity-labels-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/yarn-queue-partition-capacity-labels-test.js
new file mode 100644
index 0000000..414e326
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/yarn-queue-partition-capacity-labels-test.js
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ import { moduleForComponent, test } from 'ember-qunit';
+import hbs from 'htmlbars-inline-precompile';
+
+moduleForComponent('yarn-queue-partition-capacity-labels', 'Integration | Component | yarn queue partition capacity labels', {
+  integration: true
+});
+
+test('it renders', function(assert) {
+
+  // Set any properties with this.set('myProperty', 'value');
+  // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL +
+
+  this.render(hbs`{{yarn-queue-partition-capacity-labels}}`);
+
+  assert.equal(this.$().text().trim(), '');
+
+  // Template block usage:" + EOL +
+  this.render(hbs`
+    {{#yarn-queue-partition-capacity-labels}}
+      template block text
+    {{/yarn-queue-partition-capacity-labels}}
+  `);
+
+  assert.equal(this.$().text().trim(), 'template block text');
+});


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/38] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.8.3.xml b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.8.3.xml
new file mode 100644
index 0000000..e96d018
--- /dev/null
+++ b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.8.3.xml
@@ -0,0 +1,27495 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:47:49 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop MapReduce Core 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-annotations.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/jdiff.jar -verbose -classpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/classes:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.8.3.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.8.3.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/javax/servlet/servlet-ap
 i/2.5/servlet-api-2.5.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/com/google/inject/guice/3.0/guice-3.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.
 jar:/maven/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.8.3.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/
 jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/or
 g/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/maven/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.
 3.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar -sourcepath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java -apidir /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/site/jdiff/xml -apiname Apache Hadoop MapReduce Core 2.8.3 -->
+<package name="org.apache.hadoop.filecache">
+  <!-- start class org.apache.hadoop.filecache.DistributedCache -->
+  <class name="DistributedCache" extends="org.apache.hadoop.mapreduce.filecache.DistributedCache"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="DistributedCache"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="addLocalArchives"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="str" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a archive that has been localized to the conf.  Used
+ by internal DistributedCache code.
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local archives]]>
+      </doc>
+    </method>
+    <method name="addLocalFiles"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="str" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a file that has been localized to the conf..  Used
+ by internal DistributedCache code.
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local files]]>
+      </doc>
+    </method>
+    <method name="createAllSymlink"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Internal to MapReduce framework.  Use DistributedCacheManager
+ instead.">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="jobCacheDir" type="java.io.File"/>
+      <param name="workDir" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method create symlinks for all files in a given dir in another
+ directory. Currently symlinks cannot be disabled. This is a NO-OP.
+
+ @param conf the configuration
+ @param jobCacheDir the target directory for creating symlinks
+ @param workDir the directory in which the symlinks are created
+ @throws IOException
+ @deprecated Internal to MapReduce framework.  Use DistributedCacheManager
+ instead.]]>
+      </doc>
+    </method>
+    <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="cache" type="java.net.URI"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns {@link FileStatus} of a given cache file on hdfs. Internal to
+ MapReduce.
+ @param conf configuration
+ @param cache cache file
+ @return <code>FileStatus</code> of a given cache file on hdfs
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getTimestamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="cache" type="java.net.URI"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns mtime of a given cache file on hdfs. Internal to MapReduce.
+ @param conf configuration
+ @param cache cache file
+ @return mtime of a given cache file on hdfs
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setArchiveTimestamps"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="timestamps" type="java.lang.String"/>
+      <doc>
+      <![CDATA[This is to check the timestamp of the archives to be localized.
+ Used by internal MapReduce code.
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of archives.
+ The order should be the same as the order in which the archives are added.]]>
+      </doc>
+    </method>
+    <method name="setFileTimestamps"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="timestamps" type="java.lang.String"/>
+      <doc>
+      <![CDATA[This is to check the timestamp of the files to be localized.
+ Used by internal MapReduce code.
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of files.
+ The order should be the same as the order in which the files are added.]]>
+      </doc>
+    </method>
+    <method name="setLocalArchives"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="str" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the conf to contain the location for localized archives.  Used
+ by internal DistributedCache code.
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local archives]]>
+      </doc>
+    </method>
+    <method name="setLocalFiles"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="str" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the conf to contain the location for localized files.  Used
+ by internal DistributedCache code.
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local files]]>
+      </doc>
+    </method>
+    <field name="CACHE_FILES_SIZES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_FILES_SIZES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_FILES_SIZES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_ARCHIVES_SIZES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_ARCHIVES_SIZES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_ARCHIVES_SIZES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_ARCHIVES_TIMESTAMPS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_ARCHIVES_TIMESTAMPS} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_ARCHIVES_TIMESTAMPS}]]>
+      </doc>
+    </field>
+    <field name="CACHE_FILES_TIMESTAMPS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_FILES_TIMESTAMPS} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_FILE_TIMESTAMPS}]]>
+      </doc>
+    </field>
+    <field name="CACHE_ARCHIVES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_ARCHIVES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_ARCHIVES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_FILES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_FILES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_FILES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_LOCALARCHIVES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_LOCALARCHIVES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_LOCALARCHIVES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_LOCALFILES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_LOCALFILES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_LOCALFILES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_SYMLINK" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_SYMLINK} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_SYMLINK}]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[Distribute application-specific large, read-only files efficiently.
+ 
+ <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
+ framework to cache files (text, archives, jars etc.) needed by applications.
+ </p>
+ 
+ <p>Applications specify the files, via urls (hdfs:// or http://) to be cached 
+ via the {@link org.apache.hadoop.mapred.JobConf}. The
+ <code>DistributedCache</code> assumes that the files specified via urls are
+ already present on the {@link FileSystem} at the path specified by the url
+ and are accessible by every machine in the cluster.</p>
+ 
+ <p>The framework will copy the necessary files on to the slave node before 
+ any tasks for the job are executed on that node. Its efficiency stems from 
+ the fact that the files are only copied once per job and the ability to 
+ cache archives which are un-archived on the slaves.</p> 
+
+ <p><code>DistributedCache</code> can be used to distribute simple, read-only
+ data/text files and/or more complex types such as archives, jars etc. 
+ Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes. 
+ Jars may be optionally added to the classpath of the tasks, a rudimentary 
+ software distribution mechanism.  Files have execution permissions.
+ In older version of Hadoop Map/Reduce users could optionally ask for symlinks
+ to be created in the working directory of the child task.  In the current 
+ version symlinks are always created.  If the URL does not have a fragment 
+ the name of the file or directory will be used. If multiple files or 
+ directories map to the same link name, the last one added, will be used.  All
+ others will not even be downloaded.</p>
+ 
+ <p><code>DistributedCache</code> tracks modification timestamps of the cache 
+ files. Clearly the cache files should not be modified by the application 
+ or externally while the job is executing.</p>
+ 
+ <p>Here is an illustrative example on how to use the 
+ <code>DistributedCache</code>:</p>
+ <p><blockquote><pre>
+     // Setting up the cache for the application
+     
+     1. Copy the requisite files to the <code>FileSystem</code>:
+     
+     $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat  
+     $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip  
+     $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
+     $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
+     $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
+     $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
+     
+     2. Setup the application's <code>JobConf</code>:
+     
+     JobConf job = new JobConf();
+     DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"), 
+                                   job);
+     DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
+     DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
+     DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job);
+     DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
+     DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
+     
+     3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper}
+     or {@link org.apache.hadoop.mapred.Reducer}:
+     
+     public static class MapClass extends MapReduceBase  
+     implements Mapper&lt;K, V, K, V&gt; {
+     
+       private Path[] localArchives;
+       private Path[] localFiles;
+       
+       public void configure(JobConf job) {
+         // Get the cached archives/files
+         File f = new File("./map.zip/some/file/in/zip.txt");
+       }
+       
+       public void map(K key, V value, 
+                       OutputCollector&lt;K, V&gt; output, Reporter reporter) 
+       throws IOException {
+         // Use data from the cached archives/files here
+         // ...
+         // ...
+         output.collect(k, v);
+       }
+     }
+     
+ </pre></blockquote>
+
+ It is also very common to use the DistributedCache by using
+ {@link org.apache.hadoop.util.GenericOptionsParser}.
+
+ This class includes methods that should be used by users
+ (specifically those mentioned in the example above, as well
+ as {@link DistributedCache#addArchiveToClassPath(Path, Configuration)}),
+ as well as methods intended for use by the MapReduce framework
+ (e.g., {@link org.apache.hadoop.mapred.JobClient}).
+
+ @see org.apache.hadoop.mapred.JobConf
+ @see org.apache.hadoop.mapred.JobClient
+ @see org.apache.hadoop.mapreduce.Job]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.filecache.DistributedCache -->
+</package>
+<package name="org.apache.hadoop.mapred">
+  <!-- start class org.apache.hadoop.mapred.ClusterStatus -->
+  <class name="ClusterStatus" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <method name="getTaskTrackers" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of task trackers in the cluster.
+ 
+ @return the number of task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getActiveTrackerNames" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the names of task trackers in the cluster.
+ 
+ @return the active task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getBlacklistedTrackerNames" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the names of task trackers in the cluster.
+ 
+ @return the blacklisted task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getGraylistedTrackerNames" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the names of graylisted task trackers in the cluster.
+
+ The gray list of trackers is no longer available on M/R 2.x. The function
+ is kept to be compatible with M/R 1.x applications.
+
+ @return an empty graylisted task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getGraylistedTrackers" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of graylisted task trackers in the cluster.
+
+ The gray list of trackers is no longer available on M/R 2.x. The function
+ is kept to be compatible with M/R 1.x applications.
+
+ @return 0 graylisted task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getBlacklistedTrackers" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of blacklisted task trackers in the cluster.
+ 
+ @return the number of blacklisted task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getNumExcludedNodes" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of excluded hosts in the cluster.
+ @return the number of excluded hosts in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getTTExpiryInterval" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the tasktracker expiry interval for the cluster
+ @return the expiry interval in msec]]>
+      </doc>
+    </method>
+    <method name="getMapTasks" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of currently running map tasks in the cluster.
+ 
+ @return the number of currently running map tasks in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getReduceTasks" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of currently running reduce tasks in the cluster.
+ 
+ @return the number of currently running reduce tasks in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getMaxMapTasks" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the maximum capacity for running map tasks in the cluster.
+ 
+ @return the maximum capacity for running map tasks in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getMaxReduceTasks" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the maximum capacity for running reduce tasks in the cluster.
+ 
+ @return the maximum capacity for running reduce tasks in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getJobTrackerStatus" return="org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the JobTracker's status.
+ 
+ @return {@link JobTrackerStatus} of the JobTracker]]>
+      </doc>
+    </method>
+    <method name="getMaxMemory" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns UNINITIALIZED_MEMORY_VALUE (-1)]]>
+      </doc>
+    </method>
+    <method name="getUsedMemory" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns UNINITIALIZED_MEMORY_VALUE (-1)]]>
+      </doc>
+    </method>
+    <method name="getBlackListedTrackersInfo" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Gets the list of blacklisted trackers along with reasons for blacklisting.
+ 
+ @return the collection of {@link BlackListInfo} objects.]]>
+      </doc>
+    </method>
+    <method name="getJobTrackerState" return="org.apache.hadoop.mapred.JobTracker.State"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current state of the <code>JobTracker</code>,
+ as {@link JobTracker.State}
+
+ {@link JobTracker.State} should no longer be used on M/R 2.x. The function
+ is kept to be compatible with M/R 1.x applications.
+
+ @return the invalid state of the <code>JobTracker</code>.]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="UNINITIALIZED_MEMORY_VALUE" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Status information on the current state of the Map-Reduce cluster.
+ 
+ <p><code>ClusterStatus</code> provides clients with information such as:
+ <ol>
+   <li>
+   Size of the cluster. 
+   </li>
+   <li>
+   Name of the trackers. 
+   </li>
+   <li>
+   Task capacity of the cluster. 
+   </li>
+   <li>
+   The number of currently running map and reduce tasks.
+   </li>
+   <li>
+   State of the <code>JobTracker</code>.
+   </li>
+   <li>
+   Details regarding black listed trackers.
+   </li>
+ </ol>
+ 
+ <p>Clients can query for the latest <code>ClusterStatus</code>, via 
+ {@link JobClient#getClusterStatus()}.</p>
+ 
+ @see JobClient]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.ClusterStatus -->
+  <!-- start class org.apache.hadoop.mapred.Counters -->
+  <class name="Counters" extends="org.apache.hadoop.mapreduce.counters.AbstractCounters"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Counters"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="Counters" type="org.apache.hadoop.mapreduce.Counters"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getGroup" return="org.apache.hadoop.mapred.Counters.Group"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="groupName" type="java.lang.String"/>
+    </method>
+    <method name="getGroupNames" return="java.util.Collection"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="makeCompactString" return="java.lang.String"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="group" type="java.lang.String"/>
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #findCounter(String, String)} instead">
+      <param name="group" type="java.lang.String"/>
+      <param name="id" type="int"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Find a counter by using strings
+ @param group the name of the group
+ @param id the id of the counter within the group (0 to N-1)
+ @param name the internal name of the counter
+ @return the counter for that name
+ @deprecated use {@link #findCounter(String, String)} instead]]>
+      </doc>
+    </method>
+    <method name="incrCounter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Enum"/>
+      <param name="amount" type="long"/>
+      <doc>
+      <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param key identifies a counter
+ @param amount amount by which counter is to be incremented]]>
+      </doc>
+    </method>
+    <method name="incrCounter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="group" type="java.lang.String"/>
+      <param name="counter" type="java.lang.String"/>
+      <param name="amount" type="long"/>
+      <doc>
+      <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param group the name of the group
+ @param counter the internal name of the counter
+ @param amount amount by which counter is to be incremented]]>
+      </doc>
+    </method>
+    <method name="getCounter" return="long"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Enum"/>
+      <doc>
+      <![CDATA[Returns current value of the specified counter, or 0 if the counter
+ does not exist.
+ @param key the counter enum to lookup
+ @return the counter value or 0 if counter not found]]>
+      </doc>
+    </method>
+    <method name="incrAllCounters"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.mapred.Counters"/>
+      <doc>
+      <![CDATA[Increments multiple counters by their amounts in another Counters
+ instance.
+ @param other the other Counters instance]]>
+      </doc>
+    </method>
+    <method name="size" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #countCounters()} instead">
+      <doc>
+      <![CDATA[@return the total number of counters
+ @deprecated use {@link #countCounters()} instead]]>
+      </doc>
+    </method>
+    <method name="sum" return="org.apache.hadoop.mapred.Counters"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="a" type="org.apache.hadoop.mapred.Counters"/>
+      <param name="b" type="org.apache.hadoop.mapred.Counters"/>
+      <doc>
+      <![CDATA[Convenience method for computing the sum of two sets of counters.
+ @param a the first counters
+ @param b the second counters
+ @return a new summed counters object]]>
+      </doc>
+    </method>
+    <method name="log"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="log" type="org.apache.commons.logging.Log"/>
+      <doc>
+      <![CDATA[Logs the current counter values.
+ @param log The log to use.]]>
+      </doc>
+    </method>
+    <method name="makeEscapedCompactString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Represent the counter in a textual format that can be converted back to
+ its object form
+ @return the string in the following format
+ {(groupName)(group-displayName)[(counterName)(displayName)(value)][]*}*]]>
+      </doc>
+    </method>
+    <method name="fromEscapedCompactString" return="org.apache.hadoop.mapred.Counters"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="compactString" type="java.lang.String"/>
+      <exception name="ParseException" type="java.text.ParseException"/>
+      <doc>
+      <![CDATA[Convert a stringified (by {@link #makeEscapedCompactString()} counter
+ representation into a counter object.
+ @param compactString to parse
+ @return a new counters object
+ @throws ParseException]]>
+      </doc>
+    </method>
+    <field name="MAX_COUNTER_LIMIT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MAX_GROUP_LIMIT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A set of named counters.
+
+ <p><code>Counters</code> represent global counters, defined either by the
+ Map-Reduce framework or applications. Each <code>Counter</code> can be of
+ any {@link Enum} type.</p>
+
+ <p><code>Counters</code> are bunched into {@link Group}s, each comprising of
+ counters from a particular <code>Enum</code> class.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.Counters -->
+  <!-- start class org.apache.hadoop.mapred.Counters.Counter -->
+  <class name="Counters.Counter" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.Counter"/>
+    <constructor name="Counters.Counter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setDisplayName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="displayName" type="java.lang.String"/>
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDisplayName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getValue" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="long"/>
+    </method>
+    <method name="increment"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="incr" type="long"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="makeEscapedCompactString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the compact stringified version of the counter in the format
+ [(actual-name)(display-name)(value)]
+ @return the stringified result]]>
+      </doc>
+    </method>
+    <method name="contentEquals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="deprecated, no comment">
+      <param name="counter" type="org.apache.hadoop.mapred.Counters.Counter"/>
+      <doc>
+      <![CDATA[Checks for (content) equality of two (basic) counters
+ @param counter to compare
+ @return true if content equals
+ @deprecated]]>
+      </doc>
+    </method>
+    <method name="getCounter" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the value of the counter]]>
+      </doc>
+    </method>
+    <method name="getUnderlyingCounter" return="org.apache.hadoop.mapreduce.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericRight" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A counter record, comprising its name and value.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.Counters.Counter -->
+  <!-- start class org.apache.hadoop.mapred.Counters.Group -->
+  <class name="Counters.Group" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.counters.CounterGroupBase"/>
+    <constructor name="Counters.Group"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getCounter" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[@param counterName the name of the counter
+ @return the value of the specified counter, or 0 if the counter does
+ not exist.]]>
+      </doc>
+    </method>
+    <method name="makeEscapedCompactString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the compact stringified version of the group in the format
+ {(actual-name)(display-name)(value)[][][]} where [] are compact strings
+ for the counters within.]]>
+      </doc>
+    </method>
+    <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #findCounter(String)} instead">
+      <param name="id" type="int"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the counter for the given id and create it if it doesn't exist.
+ @param id the numeric id of the counter within the group
+ @param name the internal counter name
+ @return the counter
+ @deprecated use {@link #findCounter(String)} instead]]>
+      </doc>
+    </method>
+    <method name="getCounterForName" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the counter for the given name and create it if it doesn't exist.
+ @param name the internal counter name
+ @return the counter]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="iterator" return="java.util.Iterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDisplayName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setDisplayName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="displayName" type="java.lang.String"/>
+    </method>
+    <method name="addCounter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counter" type="org.apache.hadoop.mapred.Counters.Counter"/>
+    </method>
+    <method name="addCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="displayName" type="java.lang.String"/>
+      <param name="value" type="long"/>
+    </method>
+    <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.String"/>
+      <param name="displayName" type="java.lang.String"/>
+    </method>
+    <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.String"/>
+      <param name="create" type="boolean"/>
+    </method>
+    <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.String"/>
+    </method>
+    <method name="size" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="incrAllCounters"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="rightGroup" type="org.apache.hadoop.mapreduce.counters.CounterGroupBase"/>
+    </method>
+    <method name="getUnderlyingGroup" return="org.apache.hadoop.mapreduce.counters.CounterGroupBase"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericRight" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[<code>Group</code> of counters, comprising of counters from a particular
+  counter {@link Enum} class.
+
+  <p><code>Group</code>handles localization of the class name and the
+  counter names.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.Counters.Group -->
+  <!-- start class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+  <class name="FileAlreadyExistsException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FileAlreadyExistsException"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileAlreadyExistsException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Used when target file already exists for any operation and 
+ is not configured to be overwritten.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+  <!-- start class org.apache.hadoop.mapred.FileInputFormat -->
+  <class name="FileInputFormat" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.InputFormat"/>
+    <constructor name="FileInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setMinSplitSize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="minSplitSize" type="long"/>
+    </method>
+    <method name="isSplitable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="filename" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Is the given filename splittable? Usually, true, but if the file is
+ stream compressed, it will not be.
+
+ The default implementation in <code>FileInputFormat</code> always returns
+ true. Implementations that may deal with non-splittable files <i>must</i>
+ override this method.
+
+ <code>FileInputFormat</code> implementations can override this and return
+ <code>false</code> to ensure that individual input files are never split-up
+ so that {@link Mapper}s process entire files.
+ 
+ @param fs the file system that the file is on
+ @param filename the file name to check
+ @return is this file splitable?]]>
+      </doc>
+    </method>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setInputPathFilter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="filter" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set a PathFilter to be applied to the input paths for the map-reduce job.
+
+ @param filter the PathFilter class use for filtering the input paths.]]>
+      </doc>
+    </method>
+    <method name="getInputPathFilter" return="org.apache.hadoop.fs.PathFilter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get a PathFilter instance of the filter set for the input paths.
+
+ @return the PathFilter instance set for the job, NULL if none has been set.]]>
+      </doc>
+    </method>
+    <method name="addInputPathRecursively"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="result" type="java.util.List"/>
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="inputFilter" type="org.apache.hadoop.fs.PathFilter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Add files in the input path recursively into the results.
+ @param result
+          The List to store all files.
+ @param fs
+          The FileSystem.
+ @param path
+          The input path.
+ @param inputFilter
+          The input filter that can be used to filter files/dirs. 
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression. 
+ 
+ @param job the job to list input paths for
+ @return array of FileStatus objects
+ @throws IOException if zero items.]]>
+      </doc>
+    </method>
+    <method name="makeSplit" return="org.apache.hadoop.mapred.FileSplit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="start" type="long"/>
+      <param name="length" type="long"/>
+      <param name="hosts" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[A factory that makes the split for this class. It can be overridden
+ by sub-classes to make sub-types]]>
+      </doc>
+    </method>
+    <method name="makeSplit" return="org.apache.hadoop.mapred.FileSplit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="start" type="long"/>
+      <param name="length" type="long"/>
+      <param name="hosts" type="java.lang.String[]"/>
+      <param name="inMemoryHosts" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[A factory that makes the split for this class. It can be overridden
+ by sub-classes to make sub-types]]>
+      </doc>
+    </method>
+    <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="numSplits" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Splits files returned by {@link #listStatus(JobConf)} when
+ they're too big.]]>
+      </doc>
+    </method>
+    <method name="computeSplitSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="goalSize" type="long"/>
+      <param name="minSize" type="long"/>
+      <param name="blockSize" type="long"/>
+    </method>
+    <method name="getBlockIndex" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+      <param name="offset" type="long"/>
+    </method>
+    <method name="setInputPaths"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="commaSeparatedPaths" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Sets the given comma separated paths as the list of inputs 
+ for the map-reduce job.
+ 
+ @param conf Configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be set as 
+        the list of inputs for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="addInputPaths"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="commaSeparatedPaths" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add the given comma separated paths to the list of inputs for
+  the map-reduce job.
+ 
+ @param conf The configuration of the job 
+ @param commaSeparatedPaths Comma separated paths to be added to
+        the list of inputs for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="setInputPaths"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="inputPaths" type="org.apache.hadoop.fs.Path[]"/>
+      <doc>
+      <![CDATA[Set the array of {@link Path}s as the list of inputs
+ for the map-reduce job.
+ 
+ @param conf Configuration of the job. 
+ @param inputPaths the {@link Path}s of the input directories/files 
+ for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="addInputPath"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+ 
+ @param conf The configuration of the job 
+ @param path {@link Path} to be added to the list of inputs for 
+            the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+ 
+ @param conf The configuration of the job 
+ @return the list of input {@link Path}s for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getSplitHosts" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+      <param name="offset" type="long"/>
+      <param name="splitSize" type="long"/>
+      <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This function identifies and returns the hosts that contribute 
+ most for a given split. For calculating the contribution, rack
+ locality is treated on par with host locality, so hosts from racks
+ that contribute the most are preferred over hosts on racks that 
+ contribute less
+ @param blkLocations The list of block locations
+ @param offset 
+ @param splitSize 
+ @return an array of hosts that contribute most to this split
+ @throws IOException]]>
+      </doc>
+    </method>
+    <field name="LOG" type="org.apache.commons.logging.Log"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NUM_INPUT_FILES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="INPUT_DIR_RECURSIVE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A base class for file-based {@link InputFormat}.
+ 
+ <p><code>FileInputFormat</code> is the base class for all file-based 
+ <code>InputFormat</code>s. This provides a generic implementation of
+ {@link #getSplits(JobConf, int)}.
+
+ Implementations of <code>FileInputFormat</code> can also override the
+ {@link #isSplitable(FileSystem, Path)} method to prevent input files
+ from being split-up in certain situations. Implementations that may
+ deal with non-splittable files <i>must</i> override this method, since
+ the default implementation assumes splitting is always possible.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FileInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.FileOutputCommitter -->
+  <class name="FileOutputCommitter" extends="org.apache.hadoop.mapred.OutputCommitter"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FileOutputCommitter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getWorkPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <param name="outputPath" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setupJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="commitJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="cleanupJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="abortJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <param name="runState" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setupTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="commitTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="abortTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="needsTaskCommit" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="isRecoverySupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isCommitJobRepeatable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="isRecoverySupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="recoverTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="LOG" type="org.apache.commons.logging.Log"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TEMP_DIR_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Temporary directory name]]>
+      </doc>
+    </field>
+    <field name="SUCCEEDED_FILE_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[An {@link OutputCommitter} that commits files specified 
+ in job output directory i.e. ${mapreduce.output.fileoutputformat.outputdir}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FileOutputCommitter -->
+  <!-- start class org.apache.hadoop.mapred.FileOutputFormat -->
+  <class name="FileOutputFormat" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.OutputFormat"/>
+    <constructor name="FileOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setCompressOutput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="compress" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether the output of the job is compressed.
+ @param conf the {@link JobConf} to modify
+ @param compress should the output of the job be compressed?]]>
+      </doc>
+    </method>
+    <method name="getCompressOutput" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Is the job output compressed?
+ @param conf the {@link JobConf} to look in
+ @return <code>true</code> if the job output should be compressed,
+         <code>false</code> otherwise]]>
+      </doc>
+    </method>
+    <method name="setOutputCompressorClass"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="codecClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param conf the {@link JobConf} to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+                   compress the job outputs]]>
+      </doc>
+    </method>
+    <method name="getOutputCompressorClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="defaultValue" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param conf the {@link JobConf} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the 
+         job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+      </doc>
+    </method>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="checkOutputSpecs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+      <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setOutputPath"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="outputDir" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Set the {@link Path} of the output directory for the map-reduce job.
+
+ @param conf The configuration of the job.
+ @param outputDir the {@link Path} of the output directory for 
+ the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get the {@link Path} to the output directory for the map-reduce job.
+ 
+ @return the {@link Path} to the output directory for the map-reduce job.
+ @see FileOutputFormat#getWorkOutputPath(JobConf)]]>
+      </doc>
+    </method>
+    <method name="getWorkOutputPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get the {@link Path} to the task's temporary output directory 
+  for the map-reduce job
+  
+ <b id="SideEffectFiles">Tasks' Side-Effect Files</b>
+ 
+ <p><i>Note:</i> The following is valid only if the {@link OutputCommitter}
+  is {@link FileOutputCommitter}. If <code>OutputCommitter</code> is not 
+  a <code>FileOutputCommitter</code>, the task's temporary output
+  directory is same as {@link #getOutputPath(JobConf)} i.e.
+  <tt>${mapreduce.output.fileoutputformat.outputdir}$</tt></p>
+  
+ <p>Some applications need to create/write-to side-files, which differ from
+ the actual job-outputs.
+ 
+ <p>In such cases there could be issues with 2 instances of the same TIP 
+ (running simultaneously e.g. speculative tasks) trying to open/write-to the
+ same file (path) on HDFS. Hence the application-writer will have to pick 
+ unique names per task-attempt (e.g. using the attemptid, say 
+ <tt>attempt_200709221812_0001_m_000000_0</tt>), not just per TIP.</p> 
+ 
+ <p>To get around this the Map-Reduce framework helps the application-writer 
+ out by maintaining a special 
+ <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> 
+ sub-directory for each task-attempt on HDFS where the output of the 
+ task-attempt goes. On successful completion of the task-attempt the files 
+ in the <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> (only) 
+ are <i>promoted</i> to <tt>${mapreduce.output.fileoutputformat.outputdir}</tt>. Of course, the 
+ framework discards the sub-directory of unsuccessful task-attempts. This 
+ is completely transparent to the application.</p>
+ 
+ <p>The application-writer can take advantage of this by creating any 
+ side-files required in <tt>${mapreduce.task.output.dir}</tt> during execution 
+ of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the 
+ framework will move them out similarly - thus she doesn't have to pick 
+ unique paths per task-attempt.</p>
+ 
+ <p><i>Note</i>: the value of <tt>${mapreduce.task.output.dir}</tt> during 
+ execution of a particular task-attempt is actually 
+ <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_{$taskid}</tt>, and this value is 
+ set by the map-reduce framework. So, just create any side-files in the 
+ path  returned by {@link #getWorkOutputPath(JobConf)} from map/reduce 
+ task to take advantage of this feature.</p>
+ 
+ <p>The entire discussion holds true for maps of jobs with 
+ reducer=NONE (i.e. 0 reduces) since output of the map, in that case, 
+ goes directly to HDFS.</p> 
+ 
+ @return the {@link Path} to the task's temporary output directory 
+ for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getTaskOutputPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Helper function to create the task's temporary output directory and 
+ return the path to the task's output file.
+ 
+ @param conf job-configuration
+ @param name temporary task-output filename
+ @return path to the task's temporary output file
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getUniqueName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Helper function to generate a name that is unique for the task.
+
+ <p>The generated name can be used to create custom files from within the
+ different tasks for the job, the names for different tasks will not collide
+ with each other.</p>
+
+ <p>The given name is postfixed with the task type, 'm' for maps, 'r' for
+ reduces and the task partition number. For example, give a name 'test'
+ running on the first map o the job the generated name will be
+ 'test-m-00000'.</p>
+
+ @param conf the configuration for the job.
+ @param name the name to make unique.
+ @return a unique name accross all tasks of the job.]]>
+      </doc>
+    </method>
+    <method name="getPathForCustomFile" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Helper function to generate a {@link Path} for a file that is unique for
+ the task within the job output directory.
+
+ <p>The path can be used to create custom files from within the map and
+ reduce tasks. The path name will be unique for each task. The path parent
+ will be the job output directory.</p>ls
+
+ <p>This method uses the {@link #getUniqueName} method to make the file name
+ unique for the task.</p>
+
+ @param conf the configuration for the job.
+ @param name the name for the file.
+ @return a unique path accross all tasks of the job.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A base class for {@link OutputFormat}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FileOutputFormat -->
+  <!-- start class org.apache.hadoop.mapred.FileSplit -->
+  <class name="FileSplit" extends="org.apache.hadoop.mapreduce.InputSplit"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.InputSplitWithLocationInfo"/>
+    <constructor name="FileSplit"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, org.apache.hadoop.mapred.JobConf"
+      static="false" final="false" visibility="public"
+      deprecated="deprecated, no comment">
+      <doc>
+      <![CDATA[Constructs a split.
+ @deprecated
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process]]>
+      </doc>
+    </constructor>
+    <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a split with host information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block, possibly null]]>
+      </doc>
+    </constructor>
+    <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[], java.lang.String[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a split with host information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block, possibly null
+ @param inMemoryHosts the list of hosts containing the block in memory]]>
+      </doc>
+    </constructor>
+    <constructor name="FileSplit" type="org.apache.hadoop.mapreduce.lib.input.FileSplit"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The file containing this split's data.]]>
+      </doc>
+    </method>
+    <method name="getStart" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The position of the first byte in the file to process.]]>
+      </doc>
+    </method>
+    <method name="getLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The number of bytes in the file to process.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getLocations" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getLocationInfo" return="org.apache.hadoop.mapred.SplitLocationInfo[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A section of an input file.  Returned by {@link
+ InputFormat#getSplits(JobConf, int)} and passed to
+ {@link InputFormat#getRecordReader(InputSplit,JobConf,Reporter)}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FileSplit -->
+  <!-- start class org.apache.hadoop.mapred.FixedLengthInputFormat -->
+  <class name="FixedLengthInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+    <constructor name="FixedLengthInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setRecordLength"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="recordLength" type="int"/>
+      <doc>
+      <![CDATA[Set the length of each record
+ @param conf configuration
+ @param recordLength the length of a record]]>
+      </doc>
+    </method>
+    <method name="getRecordLength" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get record length value
+ @param conf configuration
+ @return the record length, zero means none was set]]>
+      </doc>
+    </method>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="isSplitable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <field name="FIXED_RECORD_LENGTH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[FixedLengthInputFormat is an input format used to read input files
+ which contain fixed length records.  The content of a record need not be
+ text.  It can be arbitrary binary data.  Users must configure the record
+ length property by calling:
+ FixedLengthInputFormat.setRecordLength(conf, recordLength);<br><br> or
+ conf.setInt(FixedLengthInputFormat.FIXED_RECORD_LENGTH, recordLength);
+ <br><br>
+ @see FixedLengthRecordReader]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FixedLengthInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.ID -->
+  <class name="ID" extends="org.apache.hadoop.mapreduce.ID"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ID" type="int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[constructs an ID object from the given int]]>
+      </doc>
+    </constructor>
+    <constructor name="ID"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </const

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/38] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.8.3.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.8.3.xml
new file mode 100644
index 0000000..3f6c5eb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.8.3.xml
@@ -0,0 +1,2316 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:39:30 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop YARN Client 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-annotations.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/jdiff.jar -verbose -classpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/classes:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.8.3.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/java
 x/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanuti
 ls-core-1.8.0.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/dir
 ectory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.
 6.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.8.3.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.8.3.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/maven/com/google/inject/guice/3.0/guice-3.0.jar:/
 maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar -sourcepath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java -apidir /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/site/jdiff/xml -apiname Apache Hadoop YARN Client 2.8.3 -->
+<package name="org.apache.hadoop.yarn.client">
+</package>
+<package name="org.apache.hadoop.yarn.client.api">
+  <!-- start class org.apache.hadoop.yarn.client.api.AHSClient -->
+  <class name="AHSClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AHSClient" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAHSClient" return="org.apache.hadoop.yarn.client.api.AHSClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of AHSClient.]]>
+      </doc>
+    </method>
+    <method name="getApplicationReport" return="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a report of the given Application.
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ <p>
+ If the user does not have <code>VIEW_APP</code> access then the following
+ fields in the report will be set to stubbed values:
+ <ul>
+   <li>host - set to "N/A"</li>
+   <li>RPC port - set to -1</li>
+   <li>client token - set to "N/A"</li>
+   <li>diagnostics - set to "N/A"</li>
+   <li>tracking URL - set to "N/A"</li>
+   <li>original tracking URL - set to "N/A"</li>
+   <li>resource usage report - all values are -1</li>
+ </ul>
+ 
+ @param appId
+          {@link ApplicationId} of the application that needs a report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of all Applications in the cluster.
+ </p>
+ 
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+ 
+ @return a list of reports for all applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttemptReport" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given ApplicationAttempt.
+ </p>
+ 
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+ 
+ @param applicationAttemptId
+          {@link ApplicationAttemptId} of the application attempt that needs
+          a report
+ @return application attempt report
+ @throws YarnException
+ @throws ApplicationAttemptNotFoundException if application attempt
+         not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttempts" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (ApplicationAttempts) of Application in the cluster.
+ </p>
+ 
+ @param applicationId
+ @return a list of reports for all application attempts for specified
+         application
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerReport" return="org.apache.hadoop.yarn.api.records.ContainerReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Container.
+ </p>
+ 
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+ 
+ @param containerId
+          {@link ContainerId} of the container that needs a report
+ @return container report
+ @throws YarnException
+ @throws ContainerNotFoundException if container not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (Containers) of ApplicationAttempt in the cluster.
+ </p>
+ 
+ @param applicationAttemptId
+ @return a list of reports of all containers for specified application
+         attempt
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.AHSClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.AMRMClient -->
+  <class name="AMRMClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAMRMClient" return="org.apache.hadoop.yarn.client.api.AMRMClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of AMRMClient.
+ For usage:
+ <pre>
+ {@code
+ AMRMClient.<T>createAMRMClientContainerRequest()
+ }</pre>
+ @return the newly create AMRMClient instance.]]>
+      </doc>
+    </method>
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appHostName" type="java.lang.String"/>
+      <param name="appHostPort" type="int"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Register the application master. This must be called before any 
+ other interaction
+ @param appHostName Name of the host on which master is running
+ @param appHostPort Port master is listening on
+ @param appTrackingUrl URL at which the master info can be seen
+ @return <code>RegisterApplicationMasterResponse</code>
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="allocate" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="progressIndicator" type="float"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Request additional containers and receive new container allocations.
+ Requests made via <code>addContainerRequest</code> are sent to the
+ <code>ResourceManager</code>. New containers assigned to the master are
+ retrieved. Status of completed containers and node health updates are also
+ retrieved. This also doubles up as a heartbeat to the ResourceManager and
+ must be made periodically. The call may not always return any new
+ allocations of containers. App should not make concurrent allocate
+ requests. May cause request loss.
+ 
+ <p>
+ Note : If the user has not removed container requests that have already
+ been satisfied, then the re-register may end up sending the entire
+ container requests to the RM (including matched requests). Which would mean
+ the RM could end up giving it a lot of new allocated containers.
+ </p>
+ 
+ @param progressIndicator Indicates progress made by the master
+ @return the response of the allocate request
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="unregisterApplicationMaster"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <param name="appMessage" type="java.lang.String"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Unregister the application master. This must be called in the end.
+ @param appStatus Success/Failure status of the master
+ @param appMessage Diagnostics message on failure
+ @param appTrackingUrl New URL to get master info
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="addContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Request containers for resources before calling <code>allocate</code>
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="removeContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Remove previous container request. The previous container request may have 
+ already been sent to the ResourceManager. So even after the remove request 
+ the app must be prepared to receive an allocation for the previous request 
+ even after the remove request
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="requestContainerResourceChange"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Request container resource change before calling <code>allocate</code>.
+ Any previous pending resource change request of the same container will be
+ removed.
+
+ Application that calls this method is expected to maintain the
+ <code>Container</code>s that are returned from previous successful
+ allocations or resource changes. By passing in the existing container and a
+ target resource capability to this method, the application requests the
+ ResourceManager to change the existing resource allocation to the target
+ resource allocation.
+
+ @param container The container returned from the last successful resource
+                  allocation or resource change
+ @param capability  The target resource capability of the container]]>
+      </doc>
+    </method>
+    <method name="releaseAssignedContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <doc>
+      <![CDATA[Release containers assigned by the Resource Manager. If the app cannot use
+ the container or wants to give up the container then it can release them.
+ The app needs to make new requests for the released resource capability if
+ it still needs it. eg. it released non-local resources
+ @param containerId]]>
+      </doc>
+    </method>
+    <method name="getAvailableResources" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the currently available resources in the cluster.
+ A valid value is available after a call to allocate has been made
+ @return Currently available resources]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeCount" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current number of nodes in the cluster.
+ A valid values is available after a call to allocate has been made
+ @return Current number of nodes in the cluster]]>
+      </doc>
+    </method>
+    <method name="getMatchingRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Get outstanding <code>ContainerRequest</code>s matching the given 
+ parameters. These ContainerRequests should have been added via
+ <code>addContainerRequest</code> earlier in the lifecycle. For performance,
+ the AMRMClient may return its internal collection directly without creating 
+ a copy. Users should not perform mutable operations on the return value.
+ Each collection in the list contains requests with identical 
+ <code>Resource</code> size that fit in the given capability. In a 
+ collection, requests will be returned in the same order as they were added.
+ @return Collection of request matching the parameters]]>
+      </doc>
+    </method>
+    <method name="updateBlacklist"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="blacklistAdditions" type="java.util.List"/>
+      <param name="blacklistRemovals" type="java.util.List"/>
+      <doc>
+      <![CDATA[Update application's blacklist with addition or removal resources.
+ 
+ @param blacklistAdditions list of resources which should be added to the 
+        application blacklist
+ @param blacklistRemovals list of resources which should be removed from the 
+        application blacklist]]>
+      </doc>
+    </method>
+    <method name="setNMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nmTokenCache" type="org.apache.hadoop.yarn.client.api.NMTokenCache"/>
+      <doc>
+      <![CDATA[Set the NM token cache for the <code>AMRMClient</code>. This cache must
+ be shared with the {@link NMClient} used to manage containers for the
+ <code>AMRMClient</code>
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @param nmTokenCache the NM token cache to use.]]>
+      </doc>
+    </method>
+    <method name="getNMTokenCache" return="org.apache.hadoop.yarn.client.api.NMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the NM token cache of the <code>AMRMClient</code>. This cache must be
+ shared with the {@link NMClient} used to manage containers for the
+ <code>AMRMClient</code>.
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @return the NM token cache.]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="com.google.common.base.Supplier"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each 1000 ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int)}
+ and {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="com.google.common.base.Supplier"/>
+      <param name="checkEveryMillis" type="int"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each
+ <code>checkEveryMillis</code> ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check user defined checker
+ @param checkEveryMillis interval to call <code>check</code>]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="com.google.common.base.Supplier"/>
+      <param name="checkEveryMillis" type="int"/>
+      <param name="logInterval" type="int"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each
+ <code>checkEveryMillis</code> ms. In the main loop, this method will log
+ the message "waiting in main loop" for each <code>logInterval</code> times
+ iteration to confirm the thread is alive.
+ @param check user defined checker
+ @param checkEveryMillis interval to call <code>check</code>
+ @param logInterval interval to log for each]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.AMRMClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.NMClient -->
+  <class name="NMClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createNMClient" return="org.apache.hadoop.yarn.client.api.NMClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of NMClient.]]>
+      </doc>
+    </method>
+    <method name="createNMClient" return="org.apache.hadoop.yarn.client.api.NMClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Create a new instance of NMClient.]]>
+      </doc>
+    </method>
+    <method name="startContainer" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <param name="containerLaunchContext" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Start an allocated container.</p>
+
+ <p>The <code>ApplicationMaster</code> or other applications that use the
+ client must provide the details of the allocated container, including the
+ Id, the assigned node's Id and the token via {@link Container}. In
+ addition, the AM needs to provide the {@link ContainerLaunchContext} as
+ well.</p>
+
+ @param container the allocated container
+ @param containerLaunchContext the context information needed by the
+                               <code>NodeManager</code> to launch the
+                               container
+ @return a map between the auxiliary service names and their outputs
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="increaseContainerResource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Increase the resource of a container.</p>
+
+ <p>The <code>ApplicationMaster</code> or other applications that use the
+ client must provide the details of the container, including the Id and
+ the target resource encapsulated in the updated container token via
+ {@link Container}.
+ </p>
+
+ @param container the container with updated token
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="stopContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Stop an started container.</p>
+
+ @param containerId the Id of the started container
+ @param nodeId the Id of the <code>NodeManager</code>
+ 
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerStatus" return="org.apache.hadoop.yarn.api.records.ContainerStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Query the status of a container.</p>
+
+ @param containerId the Id of the started container
+ @param nodeId the Id of the <code>NodeManager</code>
+ 
+ @return the status of a container
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="cleanupRunningContainersOnStop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="enabled" type="boolean"/>
+      <doc>
+      <![CDATA[<p>Set whether the containers that are started by this client, and are
+ still running should be stopped when the client stops. By default, the
+ feature should be enabled.</p> However, containers will be stopped only  
+ when service is stopped. i.e. after {@link NMClient#stop()}. 
+
+ @param enabled whether the feature is enabled or not]]>
+      </doc>
+    </method>
+    <method name="setNMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nmTokenCache" type="org.apache.hadoop.yarn.client.api.NMTokenCache"/>
+      <doc>
+      <![CDATA[Set the NM Token cache of the <code>NMClient</code>. This cache must be
+ shared with the {@link AMRMClient} that requested the containers managed
+ by this <code>NMClient</code>
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @param nmTokenCache the NM token cache to use.]]>
+      </doc>
+    </method>
+    <method name="getNMTokenCache" return="org.apache.hadoop.yarn.client.api.NMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the NM token cache of the <code>NMClient</code>. This cache must be
+ shared with the {@link AMRMClient} that requested the containers managed
+ by this <code>NMClient</code>
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @return the NM token cache]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.NMClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.NMTokenCache -->
+  <class name="NMTokenCache" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMTokenCache"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Creates a NM token cache instance.]]>
+      </doc>
+    </constructor>
+    <method name="getSingleton" return="org.apache.hadoop.yarn.client.api.NMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the singleton NM token cache.
+
+ @return the singleton NM token cache.]]>
+      </doc>
+    </method>
+    <method name="getNMToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns NMToken, null if absent. Only the singleton obtained from
+ {@link #getSingleton()} is looked at for the tokens. If you are using your
+ own NMTokenCache that is different from the singleton, use
+ {@link #getToken(String) }
+ 
+ @param nodeAddr
+ @return {@link Token} NMToken required for communicating with node manager]]>
+      </doc>
+    </method>
+    <method name="setNMToken"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <param name="token" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <doc>
+      <![CDATA[Sets the NMToken for node address only in the singleton obtained from
+ {@link #getSingleton()}. If you are using your own NMTokenCache that is
+ different from the singleton, use {@link #setToken(String, Token) }
+ 
+ @param nodeAddr
+          node address (host:port)
+ @param token
+          NMToken]]>
+      </doc>
+    </method>
+    <method name="getToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns NMToken, null if absent
+ @param nodeAddr
+ @return {@link Token} NMToken required for communicating with node
+         manager]]>
+      </doc>
+    </method>
+    <method name="setToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <param name="token" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <doc>
+      <![CDATA[Sets the NMToken for node address
+ @param nodeAddr node address (host:port)
+ @param token NMToken]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[NMTokenCache manages NMTokens required for an Application Master
+ communicating with individual NodeManagers.
+ <p>
+ By default Yarn client libraries {@link AMRMClient} and {@link NMClient} use
+ {@link #getSingleton()} instance of the cache.
+ <ul>
+   <li>
+     Using the singleton instance of the cache is appropriate when running a
+     single ApplicationMaster in the same JVM.
+   </li>
+   <li>
+     When using the singleton, users don't need to do anything special,
+     {@link AMRMClient} and {@link NMClient} are already set up to use the
+     default singleton {@link NMTokenCache}
+     </li>
+ </ul>
+ If running multiple Application Masters in the same JVM, a different cache
+ instance should be used for each Application Master.
+ <ul>
+   <li>
+     If using the {@link AMRMClient} and the {@link NMClient}, setting up
+     and using an instance cache is as follows:
+ <pre>
+   NMTokenCache nmTokenCache = new NMTokenCache();
+   AMRMClient rmClient = AMRMClient.createAMRMClient();
+   NMClient nmClient = NMClient.createNMClient();
+   nmClient.setNMTokenCache(nmTokenCache);
+   ...
+ </pre>
+   </li>
+   <li>
+     If using the {@link AMRMClientAsync} and the {@link NMClientAsync},
+     setting up and using an instance cache is as follows:
+ <pre>
+   NMTokenCache nmTokenCache = new NMTokenCache();
+   AMRMClient rmClient = AMRMClient.createAMRMClient();
+   NMClient nmClient = NMClient.createNMClient();
+   nmClient.setNMTokenCache(nmTokenCache);
+   AMRMClientAsync rmClientAsync = new AMRMClientAsync(rmClient, 1000, [AMRM_CALLBACK]);
+   NMClientAsync nmClientAsync = new NMClientAsync("nmClient", nmClient, [NM_CALLBACK]);
+   ...
+ </pre>
+   </li>
+   <li>
+     If using {@link ApplicationMasterProtocol} and
+     {@link ContainerManagementProtocol} directly, setting up and using an
+     instance cache is as follows:
+ <pre>
+   NMTokenCache nmTokenCache = new NMTokenCache();
+   ...
+   ApplicationMasterProtocol amPro = ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class);
+   ...
+   AllocateRequest allocateRequest = ...
+   ...
+   AllocateResponse allocateResponse = rmClient.allocate(allocateRequest);
+   for (NMToken token : allocateResponse.getNMTokens()) {
+     nmTokenCache.setToken(token.getNodeId().toString(), token.getToken());
+   }
+   ...
+   ContainerManagementProtocolProxy nmPro = ContainerManagementProtocolProxy(conf, nmTokenCache);
+   ...
+   nmPro.startContainer(container, containerContext);
+   ...
+ </pre>
+   </li>
+ </ul>
+ It is also possible to mix the usage of a client ({@code AMRMClient} or
+ {@code NMClient}, or the async versions of them) with a protocol proxy
+ ({@code ContainerManagementProtocolProxy} or
+ {@code ApplicationMasterProtocol}).]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.NMTokenCache -->
+  <!-- start class org.apache.hadoop.yarn.client.api.SharedCacheClient -->
+  <class name="SharedCacheClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SharedCacheClient" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createSharedCacheClient" return="org.apache.hadoop.yarn.client.api.SharedCacheClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="use" return="org.apache.hadoop.fs.Path"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="resourceKey" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ The method to claim a resource with the <code>SharedCacheManager.</code>
+ The client uses a checksum to identify the resource and an
+ {@link ApplicationId} to identify which application will be using the
+ resource.
+ </p>
+ 
+ <p>
+ The <code>SharedCacheManager</code> responds with whether or not the
+ resource exists in the cache. If the resource exists, a <code>Path</code>
+ to the resource in the shared cache is returned. If the resource does not
+ exist, null is returned instead.
+ </p>
+ 
+ @param applicationId ApplicationId of the application using the resource
+ @param resourceKey the key (i.e. checksum) that identifies the resource
+ @return Path to the resource, or null if it does not exist]]>
+      </doc>
+    </method>
+    <method name="release"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="resourceKey" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ The method to release a resource with the <code>SharedCacheManager.</code>
+ This method is called once an application is no longer using a claimed
+ resource in the shared cache. The client uses a checksum to identify the
+ resource and an {@link ApplicationId} to identify which application is
+ releasing the resource.
+ </p>
+ 
+ <p>
+ Note: This method is an optimization and the client is not required to call
+ it for correctness.
+ </p>
+ 
+ @param applicationId ApplicationId of the application releasing the
+          resource
+ @param resourceKey the key (i.e. checksum) that identifies the resource]]>
+      </doc>
+    </method>
+    <method name="getFileChecksum" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="sourceFile" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[A convenience method to calculate the checksum of a specified file.
+ 
+ @param sourceFile A path to the input file
+ @return A hex string containing the checksum digest
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the client for YARN's shared cache.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.SharedCacheClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.YarnClient -->
+  <class name="YarnClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YarnClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createYarnClient" return="org.apache.hadoop.yarn.client.api.YarnClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of YarnClient.]]>
+      </doc>
+    </method>
+    <method name="createApplication" return="org.apache.hadoop.yarn.client.api.YarnClientApplication"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Obtain a {@link YarnClientApplication} for a new application,
+ which in turn contains the {@link ApplicationSubmissionContext} and
+ {@link org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse}
+ objects.
+ </p>
+
+ @return {@link YarnClientApplication} built for a new application
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="submitApplication" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appContext" type="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Submit a new application to <code>YARN.</code> It is a blocking call - it
+ will not return {@link ApplicationId} until the submitted application is
+ submitted successfully and accepted by the ResourceManager.
+ </p>
+ 
+ <p>
+ Users should provide an {@link ApplicationId} as part of the parameter
+ {@link ApplicationSubmissionContext} when submitting a new application,
+ otherwise it will throw the {@link ApplicationIdNotProvidedException}.
+ </p>
+
+ <p>This internally calls {@link ApplicationClientProtocol#submitApplication
+ (SubmitApplicationRequest)}, and after that, it internally invokes
+ {@link ApplicationClientProtocol#getApplicationReport
+ (GetApplicationReportRequest)} and waits till it can make sure that the
+ application gets properly submitted. If RM fails over or RM restart
+ happens before ResourceManager saves the application's state,
+ {@link ApplicationClientProtocol
+ #getApplicationReport(GetApplicationReportRequest)} will throw
+ the {@link ApplicationNotFoundException}. This API automatically resubmits
+ the application with the same {@link ApplicationSubmissionContext} when it
+ catches the {@link ApplicationNotFoundException}</p>
+
+ @param appContext
+          {@link ApplicationSubmissionContext} containing all the details
+          needed to submit a new application
+ @return {@link ApplicationId} of the accepted application
+ @throws YarnException
+ @throws IOException
+ @see #createApplication()]]>
+      </doc>
+    </method>
+    <method name="failApplicationAttempt"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Fail an application attempt identified by given ID.
+ </p>
+
+ @param applicationAttemptId
+          {@link ApplicationAttemptId} of the attempt to fail.
+ @throws YarnException
+           in case of errors or if YARN rejects the request due to
+           access-control restrictions.
+ @throws IOException
+ @see #getQueueAclsInfo()]]>
+      </doc>
+    </method>
+    <method name="killApplication"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Kill an application identified by given ID.
+ </p>
+ 
+ @param applicationId
+          {@link ApplicationId} of the application that needs to be killed
+ @throws YarnException
+           in case of errors or if YARN rejects the request due to
+           access-control restrictions.
+ @throws IOException
+ @see #getQueueAclsInfo()]]>
+      </doc>
+    </method>
+    <method name="killApplication"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="diagnostics" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Kill an application identified by given ID.
+ </p>
+ @param applicationId {@link ApplicationId} of the application that needs to
+          be killed
+ @param diagnostics for killing an application.
+ @throws YarnException in case of errors or if YARN rejects the request due
+           to access-control restrictions.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationReport" return="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Application.
+ </p>
+ 
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+ 
+ <p>
+ If the user does not have <code>VIEW_APP</code> access then the following
+ fields in the report will be set to stubbed values:
+ <ul>
+ <li>host - set to "N/A"</li>
+ <li>RPC port - set to -1</li>
+ <li>client token - set to "N/A"</li>
+ <li>diagnostics - set to "N/A"</li>
+ <li>tracking URL - set to "N/A"</li>
+ <li>original tracking URL - set to "N/A"</li>
+ <li>resource usage report - all values are -1</li>
+ </ul>
+ 
+ @param appId
+          {@link ApplicationId} of the application that needs a report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAMRMToken" return="org.apache.hadoop.security.token.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the AMRM token of the application.
+ <p>
+ The AMRM token is required for AM to RM scheduling operations. For 
+ managed Application Masters Yarn takes care of injecting it. For unmanaged
+ Applications Masters, the token must be obtained via this method and set
+ in the {@link org.apache.hadoop.security.UserGroupInformation} of the
+ current user.
+ <p>
+ The AMRM token will be returned only if all the following conditions are
+ met:
+ <ul>
+   <li>the requester is the owner of the ApplicationMaster</li>
+   <li>the application master is an unmanaged ApplicationMaster</li>
+   <li>the application master is in ACCEPTED state</li>
+ </ul>
+ Else this method returns NULL.
+
+ @param appId {@link ApplicationId} of the application to get the AMRM token
+ @return the AMRM token if available
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of all Applications in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @return a list of reports of all running applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTypes" type="java.util.Set"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications
+ matching the given application types in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationTypes set of application types you are interested in
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications matching the given
+ application states in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationStates set of application states you are interested in
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTypes" type="java.util.Set"/>
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications matching the given
+ application types and application states in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationTypes set of application types you are interested in
+ @param applicationStates set of application states you are interested in
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queues" type="java.util.Set"/>
+      <param name="users" type="java.util.Set"/>
+      <param name="applicationTypes" type="java.util.Set"/>
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications matching the given users,
+ queues, application types and application states in the cluster. If any of
+ the params is set to null, it is not used when filtering.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param queues set of queues you are interested in
+ @param users set of users you are interested in
+ @param applicationTypes set of application types you are interested in
+ @param applicationStates set of application states you are interested in
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getYarnClusterMetrics" return="org.apache.hadoop.yarn.api.records.YarnClusterMetrics"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get metrics ({@link YarnClusterMetrics}) about the cluster.
+ </p>
+ 
+ @return cluster metrics
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getNodeReports" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="states" type="org.apache.hadoop.yarn.api.records.NodeState[]"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of nodes ({@link NodeReport}) in the cluster.
+ </p>
+ 
+ @param states The {@link NodeState}s to filter on. If no filter states are
+          given, nodes in all states will be returned.
+ @return A list of node reports
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRMDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="org.apache.hadoop.io.Text"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a delegation token so as to be able to talk to YARN using those tokens.
+ 
+ @param renewer
+          Address of the renewer who can renew these tokens when needed by
+          securely talking to YARN.
+ @return a delegation token ({@link Token}) that can be used to
+         talk to YARN
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueInfo" return="org.apache.hadoop.yarn.api.records.QueueInfo"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queueName" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about a given <em>queue</em>.
+ </p>
+ 
+ @param queueName
+          Name of the queue whose information is needed
+ @return queue information
+ @throws YarnException
+           in case of errors or if YARN rejects the request due to
+           access-control restrictions.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAllQueues" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about all queues, recursively if there
+ is a hierarchy
+ </p>
+ 
+ @return a list of queue-information for all queues
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRootQueueInfos" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about top level queues.
+ </p>
+ 
+ @return a list of queue-information for all the top-level queues
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getChildQueueInfos" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="parent" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about all the immediate children queues
+ of the given queue
+ </p>
+ 
+ @param parent
+          Name of the queue whose child-queues' information is needed
+ @return a list of queue-information for all queues who are direct children
+         of the given parent queue.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueAclsInfo" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information about <em>acls</em> for <em>current user</em> on all the
+ existing queues.
+ </p>
+ 
+ @return a list of queue acls ({@link QueueUserACLInfo}) for
+         <em>current user</em>
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttemptReport" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given ApplicationAttempt.
+ </p>
+ 
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+ 
+ @param applicationAttemptId
+          {@link ApplicationAttemptId} of the application attempt that needs
+          a report
+ @return application attempt report
+ @throws YarnException
+ @throws ApplicationAttemptNotFoundException if application attempt
+         not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttempts" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (ApplicationAttempts) of Application in the cluster.
+ </p>
+ 
+ @param applicationId application id of the app
+ @return a list of reports for all application attempts for specified
+         application.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerReport" return="org.apache.hadoop.yarn.api.records.ContainerReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Container.
+ </p>
+ 
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+ 
+ @param containerId
+          {@link ContainerId} of the container that needs a report
+ @return container report
+ @throws YarnException
+ @throws ContainerNotFoundException if container not found.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (Containers) of ApplicationAttempt in the cluster.
+ </p>
+ 
+ @param applicationAttemptId application attempt id
+ @return a list of reports of all containers for specified application
+         attempts
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="moveApplicationAcrossQueues"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="queue" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Attempts to move the given application to the given queue.
+ </p>
+ 
+ @param appId
+    Application to move.
+ @param queue
+    Queue to place it in to.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createReservation" return="org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Obtain a {@link GetNewReservationResponse} for a new reservation,
+ which contains the {@link ReservationId} object.
+ </p>
+
+ @return The {@link GetNewReservationResponse} containing a new
+         {@link ReservationId} object.
+ @throws YarnException if reservation cannot be created.
+ @throws IOException if reservation cannot be created.]]>
+      </doc>
+    </method>
+    <method name="submitReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to submit a new reservation to the
+ {@code ResourceManager}.
+ </p>
+ 
+ <p>
+ The client packages all details of its request in a
+ {@link ReservationSubmissionRequest} object. This contains information
+ about the amount of capacity, temporal constraints, and gang needs.
+ Furthermore, the reservation might be composed of multiple stages, with
+ ordering dependencies among them.
+ </p>
+ 
+ <p>
+ In order to respond, a new admission control component in the
+ {@code ResourceManager} performs an analysis of the resources that have
+ been committed over the period of time the user is requesting, verify that
+ the user requests can be fulfilled, and that it respect a sharing policy
+ (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined
+ that the ReservationRequest is satisfiable the {@code ResourceManager}
+ answers with a {@link ReservationSubmissionResponse} that includes a
+ {@link ReservationId}. Upon failure to find a valid allocation the response
+ is an exception with the message detailing the reason of failure.
+ </p>
+ 
+ <p>
+ The semantics guarantees that the {@link ReservationId} returned,
+ corresponds to a valid reservation existing in the time-range request by
+ the user. The amount of capacity dedicated to such reservation can vary
+ overtime, depending of the allocation that has been determined. But it is
+ guaranteed to satisfy all the constraint expressed by the user in the
+ {@link ReservationDefinition}
+ </p>
+ 
+ @param request request to submit a new Reservation
+ @return response contains the {@link ReservationId} on accepting the
+         submission
+ @throws YarnException if the reservation cannot be created successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="updateReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to update an existing Reservation. This is
+ referred to as a re-negotiation process, in which a user that has
+ previously submitted a Reservation.
+ </p>
+ 
+ <p>
+ The allocation is attempted by virtually substituting all previous
+ allocations related to this Reservation with new ones, that satisfy the new
+ {@link ReservationDefinition}. Upon success the previous allocation is
+ atomically substituted by the new one, and on failure (i.e., if the system
+ cannot find a valid allocation for the updated request), the previous
+ allocation remains valid.
+ </p>
+ 
+ @param request to update an existing Reservation (the
+          {@link ReservationUpdateRequest} should refer to an existing valid
+          {@link ReservationId})
+ @return response empty on successfully updating the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           updated successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="deleteReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to remove an existing Reservation.
+ </p>
+ 
+ @param request to remove an existing Reservation (the
+          {@link ReservationDeleteRequest} should refer to an existing valid
+          {@link ReservationId})
+ @return response empty on successfully deleting the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           deleted successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="listReservations" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get the list of reservations in a plan.
+ The reservationId will be used to search for reservations to list if it is
+ provided. Otherwise, it will select active reservations within the
+ startTime and endTime (inclusive).
+ </p>
+
+ @param request to list reservations in a plan. Contains fields to select
+                String queue, ReservationId reservationId, long startTime,
+                long endTime, and a bool includeReservationAllocations.
+
+                queue: Required. Cannot be null or empty. Refers to the
+                reservable queue in the scheduler that was selected when
+                creating a reservation submission
+                {@link ReservationSubmissionRequest}.
+
+                reservationId: Optional. If provided, other fields will
+                be ignored.
+
+                startTime: Optional. If provided, only reservations that
+                end after the startTime will be selected. This defaults
+                to 0 if an invalid number is used.
+
+                endTime: Optional. If provided, only reservations that
+                start on or before endTime will be selected. This defaults
+                to Long.MAX_VALUE if an invalid number is used.
+
+                includeReservationAllocations: Optional. Flag that
+                determines whether the entire reservation allocations are
+                to be returned. Reservation allocations are subject to
+                change in the event of re-planning as described by
+                {@link ReservationDefinition}.
+
+ @return response that contains information about reservations that are
+                being searched for.
+ @throws YarnException if the request is invalid
+ @throws IOException if the request failed otherwise]]>
+      </doc>
+    </method>
+    <method name="getNodeToLabels" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node to labels mappings in existing cluster
+ </p>
+ 
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getLabelsToNodes" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get labels to nodes mapping
+ in existing cluster
+ </p>
+
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getLabelsToNodes" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labels" type="java.util.Set"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get labels to nodes mapping
+ for specified labels in existing cluster
+ </p>
+
+ @param labels labels for which labels to nodes mapping has to be retrieved
+ @return labels to nodes mappings for specific labels
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeLabels" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node labels in the cluster
+ </p>
+
+ @return cluster node labels collection
+ @throws YarnException when there is a failure in
+           {@link ApplicationClientProtocol}
+ @throws IOException when there is a failure in
+           {@link ApplicationClientProtocol}]]>
+      </doc>
+    </method>
+    <method name="updateApplicationPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to set priority of an application
+ </p>
+ @param applicationId
+ @param priority
+ @return updated priority of an application.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="signalToContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="command" type="org.apache.hadoop.yarn.api.records.SignalContainerCommand"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Signal a container identified by given ID.
+ </p>
+
+ @param containerId
+          {@link ContainerId} of the container that needs to be signaled
+ @param command the signal container command
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.YarnClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.YarnClientApplication -->
+  <class name="YarnClientApplication" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YarnClientApplication" type="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse, org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNewApplicationResponse" return="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationSubmissionContext" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.YarnClientApplication -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api.async">
+  <!-- start class org.apache.hadoop.yarn.client.api.async.AMRMClientAsync -->
+  <class name="AMRMClientAsync" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMClientAsync" type="int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.AbstractCallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMClientAsync" type="org.apache.hadoop.yarn.client.api.AMRMClient, int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.AbstractCallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMClientAsync" type="int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMClientAsync" type="org.apache.hadoop.yarn.client.api.AMRMClient, int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.AbstractCallbackHandler"/>
+      <doc>
+      <![CDATA[<p>Create a new instance of AMRMClientAsync.</p>
+
+ @param intervalMs heartbeat interval in milliseconds between AM and RM
+ @param callbackHandler callback handler that processes responses from
+                        the <code>ResourceManager</code>]]>
+      </doc>
+    </method>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="client" type="org.apache.hadoop.yarn.client.api.AMRMClient"/>
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.AbstractCallbackHandler"/>
+      <doc>
+      <![CDATA[<p>Create a new instance of AMRMClientAsync.</p>
+
+ @param client the AMRMClient instance
+ @param intervalMs heartbeat interval in milliseconds between AM and RM
+ @param callbackHandler callback handler that processes responses from
+                        the <code>ResourceManager</code>]]>
+      </doc>
+    </method>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createAMRMClientAsync(int,
+             AMRMClientAsync.AbstractCallbackHandler)} instead.">
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"/>
+      <doc>
+      <![CDATA[@deprecated Use {@link #createAMRMClientAsync(int,
+             AMRMClientAsync.AbstractCallbackHandler)} instead.]]>
+      </doc>
+    </method>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createAMRMClientAsync(AMRMClient,
+             int, AMRMClientAsync.AbstractCallbackHandler)} instead.">
+      <param name="client" type="org.apache.hadoop.yarn.client.api.AMRMClient"/>
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"/>
+      <doc>
+      <![CDATA[@deprecated Use {@link #createAMRMClientAsync(AMRMClient,
+             int, AMRMClientAsync.AbstractCallbackHandler)} instead.]]>
+      </doc>
+    </method>
+    <method name="setHeartbeatInterval"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="interval" type="int"/>
+    </method>
+    <method name="getMatchingRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+    </method>
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appHostName" type="java.lang.String"/>
+      <param name="appHostPort" type="int"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Registers this application master with the resource manager. On successful
+ registration, starts the heartbeating thread.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="unregisterApplicationMaster"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <param name="appMessage" type="java.lang.String"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Unregister the application master. This must be called in the end.
+ @param appStatus Success/Failure status of the master
+ @param appMessage Diagnostics message on failure
+ @param appTrackingUrl New URL to get master info
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="addContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/38] hadoop git commit: YARN-7466. addendum patch for failing unit test. (Contributed by Chandni Singh)

Posted by ae...@apache.org.
YARN-7466.  addendum patch for failing unit test.  (Contributed by Chandni Singh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94a2ac6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94a2ac6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94a2ac6b

Branch: refs/heads/HDFS-7240
Commit: 94a2ac6b719913aa698b66bf40b7ebbe6fa606da
Parents: 989c751
Author: Eric Yang <ey...@apache.org>
Authored: Tue Dec 19 18:42:27 2017 -0500
Committer: Eric Yang <ey...@apache.org>
Committed: Tue Dec 19 18:42:27 2017 -0500

----------------------------------------------------------------------
 .../java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94a2ac6b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
index a698ecf..12dfe18 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
@@ -177,7 +177,7 @@ public class MockAM {
       List<ContainerId> releases, String labelExpression) throws Exception {
     List<ResourceRequest> reqs =
         createReq(new String[] { host }, memory, priority, numContainers,
-            labelExpression, 0L);
+            labelExpression, -1);
     return allocate(reqs, releases);
   }
   


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/38] hadoop git commit: HDFS-12959. Fix TestOpenFilesWithSnapshot redundant configurations.

Posted by ae...@apache.org.
HDFS-12959. Fix TestOpenFilesWithSnapshot redundant configurations.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76e664e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76e664e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76e664e9

Branch: refs/heads/HDFS-7240
Commit: 76e664e931bf0784620b69bc588bd51cf2a024e6
Parents: 826507c
Author: Manoj Govindassamy <ma...@apache.org>
Authored: Thu Dec 21 15:47:15 2017 -0800
Committer: Manoj Govindassamy <ma...@apache.org>
Committed: Thu Dec 21 15:47:15 2017 -0800

----------------------------------------------------------------------
 .../server/namenode/snapshot/TestOpenFilesWithSnapshot.java   | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76e664e9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
index be118a3..17082a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
@@ -66,7 +66,8 @@ public class TestOpenFilesWithSnapshot {
   public void setup() throws IOException {
     conf.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES, true);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
     conf.set("dfs.blocksize", "1048576");
     fs = cluster.getFileSystem();
   }
@@ -252,8 +253,6 @@ public class TestOpenFilesWithSnapshot {
    */
   @Test (timeout = 120000)
   public void testPointInTimeSnapshotCopiesForOpenFiles() throws Exception {
-    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES,
-        true);
     // Construct the directory tree
     final Path level0A = new Path("/level_0_A");
     final Path level0B = new Path("/level_0_B");
@@ -738,8 +737,6 @@ public class TestOpenFilesWithSnapshot {
    */
   @Test (timeout = 120000)
   public void testOpenFilesSnapChecksumWithTrunkAndAppend() throws Exception {
-    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES,
-        true);
     // Construct the directory tree
     final Path dir = new Path("/A/B/C");
     fs.mkdirs(dir);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/38] hadoop git commit: HADOOP-13282. S3 blob etags to be made visible in S3A status/getFileChecksum() calls. Contributed by Steve Loughran

Posted by ae...@apache.org.
HADOOP-13282. S3 blob etags to be made visible in S3A status/getFileChecksum() calls.
Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8ff0cc3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8ff0cc3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8ff0cc3

Branch: refs/heads/HDFS-7240
Commit: c8ff0cc304f07bf793192291e0611b2fb4bcc4e3
Parents: ef450df
Author: Steve Loughran <st...@apache.org>
Authored: Thu Dec 21 14:58:58 2017 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Dec 21 14:58:58 2017 +0000

----------------------------------------------------------------------
 .../apache/hadoop/fs/store/EtagChecksum.java    |  90 +++++++++++++
 .../apache/hadoop/fs/store/package-info.java    |  28 ++++
 .../hadoop/fs/store/TestEtagChecksum.java       |  85 ++++++++++++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  40 ++++++
 .../hadoop/fs/s3a/ITestS3AMiscOperations.java   | 133 ++++++++++++++++---
 5 files changed, 359 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8ff0cc3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/EtagChecksum.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/EtagChecksum.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/EtagChecksum.java
new file mode 100644
index 0000000..cc29f1b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/EtagChecksum.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.store;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+
+import org.apache.hadoop.fs.FileChecksum;
+
+/**
+ * An etag as a checksum.
+ * Consider these suitable for checking if an object has changed, but
+ * not suitable for comparing two different objects for equivalence,
+ * especially between object stores.
+ */
+public class EtagChecksum extends FileChecksum {
+
+  /** The algorithm name: {@value}. */
+  private static final String ETAG = "etag";
+
+  /**
+   * Etag string.
+   */
+  private String eTag = "";
+
+  /**
+   * Create with an empty etag.
+   */
+  public EtagChecksum() {
+  }
+
+  /**
+   * Create with a string etag.
+   * @param eTag etag
+   */
+  public EtagChecksum(String eTag) {
+    this.eTag = eTag;
+  }
+
+  @Override
+  public String getAlgorithmName() {
+    return ETAG;
+  }
+
+  @Override
+  public int getLength() {
+    return eTag.getBytes(StandardCharsets.UTF_8).length;
+  }
+
+  @Override
+  public byte[] getBytes() {
+    return eTag != null
+        ? eTag.getBytes(StandardCharsets.UTF_8)
+        : new byte[0];
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    out.writeUTF(eTag != null ? eTag : "");
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    eTag = in.readUTF();
+  }
+
+  @Override
+  public String toString() {
+    return "etag: \"" + eTag  + '"';
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8ff0cc3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/package-info.java
new file mode 100644
index 0000000..ebe1db4
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package is for classes to be shared across object stores; for internal
+ * use within the hadoop-* modules only. No stability guarantees.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.fs.store;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8ff0cc3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/store/TestEtagChecksum.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/store/TestEtagChecksum.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/store/TestEtagChecksum.java
new file mode 100644
index 0000000..ef9613f
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/store/TestEtagChecksum.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.store;
+
+import java.io.IOException;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+
+/**
+ * Unit test of etag operations.
+ */
+public class TestEtagChecksum extends Assert {
+
+  private final EtagChecksum empty1 = tag("");
+  private final EtagChecksum empty2 = tag("");
+  private final EtagChecksum valid1 = tag("valid");
+  private final EtagChecksum valid2 = tag("valid");
+
+  @Test
+  public void testEmptyTagsEqual() {
+    assertEquals(empty1, empty2);
+  }
+
+  @Test
+  public void testEmptyTagRoundTrip() throws Throwable {
+    assertEquals(empty1, roundTrip(empty1));
+  }
+
+  @Test
+  public void testValidTagsEqual() {
+    assertEquals(valid1, valid2);
+  }
+
+  @Test
+  public void testValidTagRoundTrip() throws Throwable {
+    assertEquals(valid1, roundTrip(valid1));
+  }
+
+  @Test
+  public void testValidAndEmptyTagsDontMatch() {
+    assertNotEquals(valid1, empty1);
+    assertNotEquals(valid1, tag("other valid one"));
+  }
+
+  @Test
+  public void testDifferentTagsDontMatch() {
+    assertNotEquals(valid1, tag("other valid one"));
+  }
+
+  private EtagChecksum tag(String t) {
+    return new EtagChecksum(t);
+  }
+
+  private EtagChecksum roundTrip(EtagChecksum tag) throws IOException {
+    try (DataOutputBuffer dob = new DataOutputBuffer();
+         DataInputBuffer dib = new DataInputBuffer()) {
+      tag.write(dob);
+      dib.reset(dob.getData(), dob.getLength());
+      EtagChecksum t2 = new EtagChecksum();
+      t2.readFields(dib);
+      return t2;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8ff0cc3/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index f461c9e..a8147ed 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -112,6 +112,7 @@ import org.apache.hadoop.fs.s3a.s3guard.PathMetadata;
 import org.apache.hadoop.fs.s3a.s3guard.S3Guard;
 import org.apache.hadoop.fs.s3native.S3xLoginHelper;
 import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.fs.store.EtagChecksum;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.BlockingThreadPoolExecutorService;
 import org.apache.hadoop.util.Progressable;
@@ -539,6 +540,14 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   }
 
   /**
+   * Get the encryption algorithm of this endpoint.
+   * @return the encryption algorithm.
+   */
+  public S3AEncryptionMethods getServerSideEncryptionAlgorithm() {
+    return serverSideEncryptionAlgorithm;
+  }
+
+  /**
    * Demand create the directory allocator, then create a temporary file.
    * {@link LocalDirAllocator#createTmpFileForWrite(String, long, Configuration)}.
    *  @param pathStr prefix for the temporary file
@@ -1069,6 +1078,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
    * @throws IOException IO and object access problems.
    */
   @VisibleForTesting
+  @Retries.RetryRaw
   public ObjectMetadata getObjectMetadata(Path path) throws IOException {
     return getObjectMetadata(pathToKey(path));
   }
@@ -2935,6 +2945,36 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   }
 
   /**
+   * Get the etag of a object at the path via HEAD request and return it
+   * as a checksum object. This has the whatever guarantees about equivalence
+   * the S3 implementation offers.
+   * <ol>
+   *   <li>If a tag has not changed, consider the object unchanged.</li>
+   *   <li>Two tags being different does not imply the data is different.</li>
+   * </ol>
+   * Different S3 implementations may offer different guarantees.
+   * @param f The file path
+   * @param length The length of the file range for checksum calculation
+   * @return The EtagChecksum or null if checksums are not supported.
+   * @throws IOException IO failure
+   * @see <a href="http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html">Common Response Headers</a>
+   */
+
+  public EtagChecksum getFileChecksum(Path f, final long length)
+      throws IOException {
+    Preconditions.checkArgument(length >= 0);
+    Path path = qualify(f);
+    LOG.debug("getFileChecksum({})", path);
+    return once("getFileChecksum", path.toString(),
+        () -> {
+          // this always does a full HEAD to the object
+          ObjectMetadata headers = getObjectMetadata(path);
+          String eTag = headers.getETag();
+          return eTag != null ? new EtagChecksum(eTag) : null;
+        });
+  }
+
+  /**
    * {@inheritDoc}.
    *
    * This implementation is optimized for S3, which can do a bulk listing

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8ff0cc3/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
index 869d64c..ddf2529 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
@@ -18,21 +18,24 @@
 
 package org.apache.hadoop.fs.s3a;
 
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.test.LambdaTestUtils;
+import java.io.ByteArrayInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 
 import com.amazonaws.services.s3.model.ObjectMetadata;
 import com.amazonaws.services.s3.model.PutObjectRequest;
-import com.amazonaws.services.s3.model.PutObjectResult;
+import org.junit.Assume;
 import org.junit.Test;
 
-import java.io.ByteArrayInputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.concurrent.Callable;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.store.EtagChecksum;
+import org.apache.hadoop.test.LambdaTestUtils;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
 
 /**
  * Tests of the S3A FileSystem which don't have a specific home and can share
@@ -40,6 +43,8 @@ import java.util.concurrent.Callable;
  */
 public class ITestS3AMiscOperations extends AbstractS3ATestBase {
 
+  private static final byte[] HELLO = "hello".getBytes(StandardCharsets.UTF_8);
+
   @Test
   public void testCreateNonRecursiveSuccess() throws IOException {
     Path shouldWork = path("nonrecursivenode");
@@ -58,7 +63,7 @@ public class ITestS3AMiscOperations extends AbstractS3ATestBase {
   @Test(expected = FileAlreadyExistsException.class)
   public void testCreateNonRecursiveParentIsFile() throws IOException {
     Path parent = path("/file.txt");
-    ContractTestUtils.touch(getFileSystem(), parent);
+    touch(getFileSystem(), parent);
     createNonRecursive(new Path(parent, "fail"));
   }
 
@@ -73,12 +78,7 @@ public class ITestS3AMiscOperations extends AbstractS3ATestBase {
         new ByteArrayInputStream("PUT".getBytes()),
         metadata);
     LambdaTestUtils.intercept(IllegalStateException.class,
-        new Callable<PutObjectResult>() {
-          @Override
-          public PutObjectResult call() throws Exception {
-            return fs.putObjectDirect(put);
-          }
-        });
+        () -> fs.putObjectDirect(put));
     assertPathDoesNotExist("put object was created", path);
   }
 
@@ -87,4 +87,103 @@ public class ITestS3AMiscOperations extends AbstractS3ATestBase {
         (short) 3, (short) 4096,
         null);
   }
+
+  /**
+   * Touch a path, return the full path.
+   * @param name relative name
+   * @return the path
+   * @throws IOException IO failure
+   */
+  Path touchFile(String name) throws IOException {
+    Path path = path(name);
+    touch(getFileSystem(), path);
+    return path;
+  }
+
+  /**
+   * Create a file with the data, return the path.
+   * @param name relative name
+   * @param data data to write
+   * @return the path
+   * @throws IOException IO failure
+   */
+  Path mkFile(String name, byte[] data) throws IOException {
+    final Path f = path(name);
+    createFile(getFileSystem(), f, true, data);
+    return f;
+  }
+
+  /**
+   * The assumption here is that 0-byte files uploaded in a single PUT
+   * always have the same checksum, including stores with encryption.
+   * @throws Throwable on a failure
+   */
+  @Test
+  public void testEmptyFileChecksums() throws Throwable {
+    final S3AFileSystem fs = getFileSystem();
+    Path file1 = touchFile("file1");
+    EtagChecksum checksum1 = fs.getFileChecksum(file1, 0);
+    LOG.info("Checksum for {}: {}", file1, checksum1);
+    assertNotNull("file 1 checksum", checksum1);
+    assertNotEquals("file 1 checksum", 0, checksum1.getLength());
+    assertEquals("checksums", checksum1,
+        fs.getFileChecksum(touchFile("file2"), 0));
+  }
+
+  /**
+   * Verify that different file contents have different
+   * checksums, and that that they aren't the same as the empty file.
+   * @throws Throwable failure
+   */
+  @Test
+  public void testNonEmptyFileChecksums() throws Throwable {
+    final S3AFileSystem fs = getFileSystem();
+    final Path file3 = mkFile("file3", HELLO);
+    final EtagChecksum checksum1 = fs.getFileChecksum(file3, 0);
+    assertNotNull("file 3 checksum", checksum1);
+    final Path file4 = touchFile("file4");
+    final EtagChecksum checksum2 = fs.getFileChecksum(file4, 0);
+    assertNotEquals("checksums", checksum1, checksum2);
+    // overwrite
+    createFile(fs, file4, true,
+        "hello, world".getBytes(StandardCharsets.UTF_8));
+    assertNotEquals(checksum2, fs.getFileChecksum(file4, 0));
+  }
+
+  /**
+   * Verify that on an unencrypted store, the checksum of two non-empty
+   * (single PUT) files is the same if the data is the same.
+   * This will fail if the bucket has S3 default encryption enabled.
+   * @throws Throwable failure
+   */
+  @Test
+  public void testNonEmptyFileChecksumsUnencrypted() throws Throwable {
+    Assume.assumeTrue(encryptionAlgorithm().equals(S3AEncryptionMethods.NONE));
+    final S3AFileSystem fs = getFileSystem();
+    final EtagChecksum checksum1 =
+        fs.getFileChecksum(mkFile("file5", HELLO), 0);
+    assertNotNull("file 3 checksum", checksum1);
+    assertEquals("checksums", checksum1,
+        fs.getFileChecksum(mkFile("file6", HELLO), 0));
+  }
+
+  private S3AEncryptionMethods encryptionAlgorithm() {
+    return getFileSystem().getServerSideEncryptionAlgorithm();
+  }
+
+  @Test
+  public void testNegativeLength() throws Throwable {
+    LambdaTestUtils.intercept(IllegalArgumentException.class,
+        () -> getFileSystem().getFileChecksum(mkFile("negative", HELLO), -1));
+  }
+
+  @Test
+  public void testLengthPastEOF() throws Throwable {
+    final S3AFileSystem fs = getFileSystem();
+    Path f = mkFile("file5", HELLO);
+    assertEquals(
+        fs.getFileChecksum(f, HELLO.length),
+        fs.getFileChecksum(f, HELLO.length * 2));
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/38] hadoop git commit: YARN-7577. Unit Fail: TestAMRestart#testPreemptedAMRestartOnRMRestart (miklos.szegedi@cloudera.com via rkanter)

Posted by ae...@apache.org.
YARN-7577. Unit Fail: TestAMRestart#testPreemptedAMRestartOnRMRestart (miklos.szegedi@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/382215c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/382215c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/382215c7

Branch: refs/heads/HDFS-7240
Commit: 382215c72b93d6a97d813f407cf6496a7c3f2a4a
Parents: 1ba491ff
Author: Robert Kanter <rk...@apache.org>
Authored: Wed Dec 20 13:39:00 2017 -0800
Committer: Robert Kanter <rk...@apache.org>
Committed: Wed Dec 20 13:39:00 2017 -0800

----------------------------------------------------------------------
 .../applicationsmanager/TestAMRestart.java      | 131 +++++++++++--------
 1 file changed, 73 insertions(+), 58 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/382215c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
index 3d523aa..4add186 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -45,6 +46,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockMemoryRMStateStore;
+import org.apache.hadoop.yarn.server.resourcemanager.ParameterizedSchedulerTestBase;
 import org.apache.hadoop.yarn.server.resourcemanager.TestRMRestart;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData;
@@ -63,14 +65,20 @@ import org.apache.hadoop.yarn.util.Records;
 import org.junit.Assert;
 import org.junit.Test;
 
-public class TestAMRestart {
+/**
+ * Test AM restart functions.
+ */
+public class TestAMRestart extends ParameterizedSchedulerTestBase {
+
+  public TestAMRestart(SchedulerType type) throws IOException {
+    super(type);
+  }
 
   @Test(timeout = 30000)
   public void testAMRestartWithExistingContainers() throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
 
-    MockRM rm1 = new MockRM(conf);
+    MockRM rm1 = new MockRM(getConf());
     rm1.start();
     RMApp app1 =
         rm1.submitApp(200, "name", "user",
@@ -266,15 +274,14 @@ public class TestAMRestart {
 
   @Test(timeout = 30000)
   public void testNMTokensRebindOnAMRestart() throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 3);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 3);
     // To prevent test from blacklisting nm1 for AM, we sit threshold to half
     // of 2 nodes which is 1
-    conf.setFloat(
+    getConf().setFloat(
         YarnConfiguration.AM_SCHEDULING_NODE_BLACKLISTING_DISABLE_THRESHOLD,
         0.5f);
 
-    MockRM rm1 = new MockRM(conf);
+    MockRM rm1 = new MockRM(getConf());
     rm1.start();
     RMApp app1 =
         rm1.submitApp(200, "myname", "myuser",
@@ -378,11 +385,11 @@ public class TestAMRestart {
   // should not be counted towards AM max retry count.
   @Test(timeout = 100000)
   public void testShouldNotCountFailureToMaxAttemptRetry() throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
-    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
-    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
-    MockRM rm1 = new MockRM(conf);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    getConf().setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    getConf().set(
+        YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    MockRM rm1 = new MockRM(getConf());
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
@@ -503,11 +510,11 @@ public class TestAMRestart {
 
   @Test(timeout = 100000)
   public void testMaxAttemptOneMeansOne() throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
-    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
-    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
-    MockRM rm1 = new MockRM(conf);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
+    getConf().setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    getConf().set(
+        YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    MockRM rm1 = new MockRM(getConf());
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
@@ -537,14 +544,15 @@ public class TestAMRestart {
   // re-launch the AM.
   @Test(timeout = 60000)
   public void testPreemptedAMRestartOnRMRestart() throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
-    conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
+    getConf().setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    getConf().setBoolean(
+        YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
 
-    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    getConf().set(
+        YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
 
-    MockRM rm1 = new MockRM(conf);
+    MockRM rm1 = new MockRM(getConf());
     MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     rm1.start();
     MockNM nm1 =
@@ -584,12 +592,19 @@ public class TestAMRestart {
     ApplicationStateData appState =
         memStore.getState().getApplicationState().get(app1.getApplicationId());
     Assert.assertEquals(2, appState.getAttemptCount());
-    // attempt stored has the preempted container exit status.
-    Assert.assertEquals(ContainerExitStatus.PREEMPTED,
-        appState.getAttempt(am2.getApplicationAttemptId())
-            .getAMContainerExitStatus());
+    if (getSchedulerType().equals(SchedulerType.FAIR)) {
+      // attempt stored has the preempted container exit status.
+      Assert.assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,
+          appState.getAttempt(am2.getApplicationAttemptId())
+              .getAMContainerExitStatus());
+    } else {
+      // attempt stored has the preempted container exit status.
+      Assert.assertEquals(ContainerExitStatus.PREEMPTED,
+          appState.getAttempt(am2.getApplicationAttemptId())
+              .getAMContainerExitStatus());
+    }
     // Restart rm.
-    MockRM rm2 = new MockRM(conf, memStore);
+    MockRM rm2 = new MockRM(getConf(), memStore);
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
     nm1.registerNode();
     rm2.start();
@@ -615,15 +630,16 @@ public class TestAMRestart {
   @Test(timeout = 50000)
   public void testRMRestartOrFailoverNotCountedForAMFailures()
       throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
-    conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
+    getConf().setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    getConf().setBoolean(
+        YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
 
-    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    getConf().set(
+        YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
     // explicitly set max-am-retry count as 2.
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
 
-    MockRM rm1 = new MockRM(conf);
+    MockRM rm1 = new MockRM(getConf());
     MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     rm1.start();
     AbstractYarnScheduler scheduler =
@@ -651,7 +667,7 @@ public class TestAMRestart {
     RMAppAttempt attempt2 = app1.getCurrentAppAttempt();
 
     // Restart rm.
-    MockRM rm2 = new MockRM(conf, memStore);
+    MockRM rm2 = new MockRM(getConf(), memStore);
     rm2.start();
     ApplicationStateData appState =
         memStore.getState().getApplicationState().get(app1.getApplicationId());
@@ -688,14 +704,15 @@ public class TestAMRestart {
 
   @Test (timeout = 120000)
   public void testRMAppAttemptFailuresValidityInterval() throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
-    conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
+    getConf().setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    getConf().setBoolean(
+        YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
 
-    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    getConf().set(
+        YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
     // explicitly set max-am-retry count as 2.
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
-    MockRM rm1 = new MockRM(conf);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    MockRM rm1 = new MockRM(getConf());
     rm1.start();
 
     MockMemoryRMStateStore memStore =
@@ -765,7 +782,7 @@ public class TestAMRestart {
 
     // Restart rm.
     @SuppressWarnings("resource")
-    MockRM rm2 = new MockRM(conf, memStore);
+    MockRM rm2 = new MockRM(getConf(), memStore);
     rm2.start();
 
     MockMemoryRMStateStore memStore1 =
@@ -834,12 +851,11 @@ public class TestAMRestart {
     return false;
   }
 
-  @Test(timeout = 30000)
+  @Test(timeout = 40000)
   public void testAMRestartNotLostContainerCompleteMsg() throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
 
-    MockRM rm1 = new MockRM(conf);
+    MockRM rm1 = new MockRM(getConf());
     rm1.start();
     RMApp app1 =
         rm1.submitApp(200, "name", "user",
@@ -934,11 +950,10 @@ public class TestAMRestart {
   @Test (timeout = 20000)
   public void testAMRestartNotLostContainerAfterAttemptFailuresValidityInterval()
       throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
     // explicitly set max-am-retry count as 2.
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
 
-    MockRM rm1 = new MockRM(conf);
+    MockRM rm1 = new MockRM(getConf());
     rm1.start();
     MockNM nm1 =
             new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
@@ -1019,16 +1034,16 @@ public class TestAMRestart {
   @Test(timeout = 200000)
   public void testContainersFromPreviousAttemptsWithRMRestart()
       throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
-    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
-    conf.setBoolean(
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    getConf().setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    getConf().setBoolean(
         YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
-    conf.setLong(
+    getConf().setLong(
         YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 0);
-    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    getConf()
+        .set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
 
-    MockRM rm1 = new MockRM(conf);
+    MockRM rm1 = new MockRM(getConf());
     MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     rm1.start();
     YarnScheduler scheduler = rm1.getResourceScheduler();
@@ -1071,7 +1086,7 @@ public class TestAMRestart {
         (AbstractYarnScheduler)scheduler, am1.getApplicationAttemptId());
 
     // restart rm
-    MockRM rm2 = new MockRM(conf, memStore);
+    MockRM rm2 = new MockRM(getConf(), memStore);
     rm2.start();
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
     NMContainerStatus container2Status =


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/38] hadoop git commit: Merge branch 'trunk' into HDFS-7240

Posted by ae...@apache.org.
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c04492ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c04492ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c04492ac

Branch: refs/heads/HDFS-7240
Commit: c04492ac8b498d1964dc1cf880ca4b1be3f6d320
Parents: c5dcf3a 7fe6f83
Author: Anu Engineer <ae...@apache.org>
Authored: Tue Jan 2 10:08:58 2018 -0800
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue Jan 2 10:08:58 2018 -0800

----------------------------------------------------------------------
 .../jdiff/Apache_Hadoop_Common_2.8.3.xml        | 38433 +++++++++++++++++
 .../hadoop/crypto/CryptoOutputStream.java       |    11 +-
 .../apache/hadoop/fs/store/EtagChecksum.java    |    90 +
 .../apache/hadoop/fs/store/package-info.java    |    28 +
 .../hadoop/security/UserGroupInformation.java   |     5 +-
 .../hadoop/crypto/CryptoStreamsTestBase.java    |     4 +-
 .../apache/hadoop/crypto/TestCryptoStreams.java |    47 +-
 .../fs/contract/AbstractContractOpenTest.java   |   258 -
 .../AbstractContractPathHandleTest.java         |   246 +
 .../hadoop/fs/store/TestEtagChecksum.java       |    85 +
 .../security/TestFixKerberosTicketOrder.java    |    77 +
 .../hadoop/hdfs/protocol/HdfsPathHandle.java    |     3 +
 .../jdiff/Apache_Hadoop_HDFS_2.8.3.xml          |   312 +
 .../sasl/SaslDataTransferServer.java            |     2 +-
 .../BlockPlacementPolicyDefault.java            |    74 +-
 .../blockmanagement/DatanodeDescriptor.java     |     2 +-
 .../hdfs/server/namenode/FSDirAttrOp.java       |     5 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |    71 +-
 .../hdfs/tools/federation/RouterAdmin.java      |     4 +-
 .../src/site/markdown/HdfsImageViewer.md        |     2 +-
 .../hdfs/TestHDFSContractPathHandle.java        |    55 +
 .../federation/router/TestRouterAdminCLI.java   |   121 +-
 .../snapshot/TestOpenFilesWithSnapshot.java     |     7 +-
 .../test/resources/testErasureCodingConf.xml    |     4 +-
 .../Apache_Hadoop_MapReduce_Common_2.8.3.xml    |   113 +
 .../Apache_Hadoop_MapReduce_Core_2.8.3.xml      | 27495 ++++++++++++
 .../Apache_Hadoop_MapReduce_JobClient_2.8.3.xml |    16 +
 hadoop-project/pom.xml                          |     2 +
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |    93 +-
 .../apache/hadoop/fs/s3a/S3AInputStream.java    |    28 +-
 .../hadoop/fs/s3a/S3AInstrumentation.java       |    13 +
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |     3 +
 .../src/site/markdown/tools/hadoop-aws/index.md |    13 +-
 .../apache/hadoop/fs/s3a/ITestS3AClosedFS.java  |    92 +
 .../hadoop/fs/s3a/ITestS3AMiscOperations.java   |   133 +-
 .../scale/ITestS3AInputStreamPerformance.java   |     6 +-
 .../fs/azure/AzureNativeFileSystemStore.java    |    16 +-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |    25 +-
 .../fs/azure/NativeAzureFileSystemHelper.java   |    18 +
 .../hadoop/fs/azure/NativeFileSystemStore.java  |     4 +
 .../fs/azure/SecureStorageInterfaceImpl.java    |     8 +-
 .../hadoop/fs/azure/StorageInterface.java       |     2 +-
 .../hadoop/fs/azure/StorageInterfaceImpl.java   |     8 +-
 .../azure/ITestNativeAzureFileSystemLive.java   |    72 +
 .../hadoop/fs/azure/MockStorageInterface.java   |     9 +-
 hadoop-tools/hadoop-fs2img/pom.xml              |    10 +
 .../dev-support/findbugs-exclude.xml            |     1 -
 .../jdiff/Apache_Hadoop_YARN_Client_2.8.3.xml   |  2316 +
 .../jdiff/Apache_Hadoop_YARN_Common_2.8.3.xml   |  2665 ++
 .../Apache_Hadoop_YARN_Server_Common_2.8.3.xml  |   829 +
 .../hadoop/yarn/conf/YarnConfiguration.java     |    45 +-
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |    18 +-
 .../dev-support/findbugs-exclude.xml            |     8 +
 .../hadoop/yarn/service/ServiceMaster.java      |    37 +
 .../hadoop/yarn/service/ServiceScheduler.java   |    11 +
 .../yarn/service/api/records/Resource.java      |    48 +-
 .../api/records/ResourceInformation.java        |   119 +
 .../yarn/service/client/ServiceClient.java      |    26 +-
 .../yarn/service/component/Component.java       |    88 +-
 .../component/instance/ComponentInstance.java   |     6 +-
 .../yarn/service/utils/ServiceApiUtil.java      |    14 +-
 .../hadoop/yarn/service/utils/ServiceUtils.java |     4 +
 .../hadoop/yarn/service/MockServiceAM.java      |    31 +-
 .../hadoop/yarn/service/TestServiceAM.java      |    63 +-
 .../yarn/service/TestYarnNativeServices.java    |    34 +-
 .../yarn/service/conf/TestAppJsonResolve.java   |    17 +
 .../hadoop/yarn/service/conf/examples/app.json  |    11 +-
 .../hadoop/yarn/webapp/util/WebAppUtils.java    |    39 +-
 .../src/main/resources/yarn-default.xml         |    24 +
 .../launcher/RecoveredContainerLaunch.java      |     2 +-
 .../monitor/ContainersMonitorImpl.java          |    18 +-
 .../monitor/TestContainersMonitor.java          |     4 +-
 .../yarn/server/resourcemanager/MockAM.java     |     2 +-
 .../applicationsmanager/TestAMRestart.java      |   131 +-
 .../security/TestTimelineAuthFilterForV2.java   |     3 +-
 .../AbstractTimelineReaderHBaseTestBase.java    |     2 +-
 .../storage/flow/FlowRunCoprocessor.java        |     4 +-
 .../collector/NodeTimelineCollectorManager.java |    23 +-
 .../reader/TimelineReaderServer.java            |    25 +-
 .../reader/TestTimelineReaderServer.java        |     6 +-
 .../reader/TestTimelineReaderWebServices.java   |     2 +-
 .../src/site/markdown/TimelineServiceV2.md      |     6 +-
 .../markdown/yarn-service/YarnServiceAPI.md     |    15 +-
 .../webapp/app/components/queue-navigator.js    |    26 +-
 .../main/webapp/app/components/tree-selector.js |   301 +-
 .../yarn-queue-partition-capacity-labels.js     |    48 +
 .../src/main/webapp/app/constants.js            |     4 +-
 .../main/webapp/app/controllers/yarn-queues.js  |    36 +-
 .../src/main/webapp/app/initializers/loader.js  |     2 +-
 .../app/models/yarn-queue/capacity-queue.js     |    56 +-
 .../serializers/yarn-queue/capacity-queue.js    |     6 +
 .../src/main/webapp/app/styles/app.scss         |     1 +
 .../src/main/webapp/app/styles/yarn-queues.scss |    29 +
 .../templates/components/queue-navigator.hbs    |    20 +-
 .../yarn-queue-partition-capacity-labels.hbs    |    54 +
 .../components/yarn-queue/capacity-queue.hbs    |    34 +-
 .../main/webapp/app/templates/yarn-queues.hbs   |     3 +-
 ...yarn-queue-partition-capacity-labels-test.js |    43 +
 pom.xml                                         |     2 +-
 99 files changed, 74778 insertions(+), 709 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c04492ac/hadoop-project/pom.xml
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/38] hadoop git commit: YARN-7543. Add check for max cpu limit and missing file for YARN service. (Contributed by Jian He)

Posted by ae...@apache.org.
YARN-7543.  Add check for max cpu limit and missing file for YARN service.
            (Contributed by Jian He)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/989c7510
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/989c7510
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/989c7510

Branch: refs/heads/HDFS-7240
Commit: 989c75109a619deeaee7461864e7cb3c289c9421
Parents: c0aeb66
Author: Eric Yang <ey...@apache.org>
Authored: Tue Dec 19 16:45:04 2017 -0500
Committer: Eric Yang <ey...@apache.org>
Committed: Tue Dec 19 16:45:04 2017 -0500

----------------------------------------------------------------------
 .../hadoop/yarn/service/utils/ServiceApiUtil.java     | 14 ++++++++++----
 .../hadoop/yarn/service/utils/ServiceUtils.java       |  4 ++++
 2 files changed, 14 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/989c7510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
index d5ea45c..7f85c95 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
@@ -330,13 +330,19 @@ public class ServiceApiUtil {
       org.apache.hadoop.yarn.api.records.Resource maxResource,
       Service service) throws YarnException {
     for (Component component : service.getComponents()) {
-      // only handle mem now.
       long mem = Long.parseLong(component.getResource().getMemory());
       if (mem > maxResource.getMemorySize()) {
         throw new YarnException(
-            "Component " + component.getName() + " memory size (" + mem
-                + ") is larger than configured max container memory size ("
-                + maxResource.getMemorySize() + ")");
+            "Component " + component.getName() + ": specified memory size ("
+                + mem + ") is larger than configured max container memory " +
+                "size (" + maxResource.getMemorySize() + ")");
+      }
+      int cpu = component.getResource().getCpus();
+      if (cpu > maxResource.getVirtualCores()) {
+        throw new YarnException(
+            "Component " + component.getName() + ": specified number of " +
+                "virtual core (" + cpu + ") is larger than configured max " +
+                "virtual core size (" + maxResource.getVirtualCores() + ")");
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/989c7510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java
index 173001b..915b836 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java
@@ -411,6 +411,10 @@ public final class ServiceUtils {
         return;
       }
       for (File jarFile : listOfJars) {
+        if (!jarFile.exists()) {
+          log.debug("File does not exist, skipping: " + jarFile);
+          continue;
+        }
         LocalResource res = sliderFileSystem.submitFile(jarFile, tempPath, libDir, jarFile.getName());
         providerResources.put(libDir + "/" + jarFile.getName(), res);
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/38] hadoop git commit: HDFS-12951. Incorrect javadoc in SaslDataTransferServer.java#receive. Contributed by Mukul Kumar Singh.

Posted by ae...@apache.org.
HDFS-12951. Incorrect javadoc in SaslDataTransferServer.java#receive. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/826507c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/826507c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/826507c4

Branch: refs/heads/HDFS-7240
Commit: 826507c41b7dd89ce5b53d2245d09c2443423670
Parents: b318bed
Author: Chen Liang <cl...@apache.org>
Authored: Thu Dec 21 11:20:30 2017 -0800
Committer: Chen Liang <cl...@apache.org>
Committed: Thu Dec 21 11:20:30 2017 -0800

----------------------------------------------------------------------
 .../hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/826507c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
index e67d873..e3a72d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
@@ -97,7 +97,7 @@ public class SaslDataTransferServer {
    * @param peer connection peer
    * @param underlyingOut connection output stream
    * @param underlyingIn connection input stream
-   * @param int xferPort data transfer port of DataNode accepting connection
+   * @param xferPort data transfer port of DataNode accepting connection
    * @param datanodeId ID of DataNode accepting connection
    * @return new pair of streams, wrapped after SASL negotiation
    * @throws IOException for any error


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/38] hadoop git commit: HADOOP-15122. Lock down version of doxia-module-markdown plugin. Contributed by Marton Elek.

Posted by ae...@apache.org.
HADOOP-15122. Lock down version of doxia-module-markdown plugin. Contributed by Marton Elek.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bb765ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bb765ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bb765ee

Branch: refs/heads/HDFS-7240
Commit: 4bb765ee2e9fa16bb18c70d8f62ab885d883f414
Parents: 7467e8f
Author: Miklos Szegedi <sz...@apache.org>
Authored: Fri Dec 29 16:59:25 2017 -0800
Committer: Miklos Szegedi <sz...@apache.org>
Committed: Fri Dec 29 16:59:25 2017 -0800

----------------------------------------------------------------------
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bb765ee/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index f0b3c8e..d776678 100644
--- a/pom.xml
+++ b/pom.xml
@@ -187,7 +187,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
             <dependency>
               <groupId>org.apache.maven.doxia</groupId>
               <artifactId>doxia-module-markdown</artifactId>
-              <version>1.8-SNAPSHOT</version>
+              <version>1.8</version>
             </dependency>
           </dependencies>
         </plugin>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/38] hadoop git commit: YARN-7616. Map YARN application status to Service Status more accurately. (Contributed by Gour Saha)

Posted by ae...@apache.org.
YARN-7616. Map YARN application status to Service Status more accurately.  (Contributed by Gour Saha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41b58101
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41b58101
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41b58101

Branch: refs/heads/HDFS-7240
Commit: 41b581012a83a17db785343362c718363e13e8f5
Parents: 94a2ac6
Author: Eric Yang <ey...@apache.org>
Authored: Tue Dec 19 19:14:45 2017 -0500
Committer: Eric Yang <ey...@apache.org>
Committed: Tue Dec 19 19:14:45 2017 -0500

----------------------------------------------------------------------
 .../hadoop/yarn/service/ServiceMaster.java      | 37 ++++++++++++++
 .../hadoop/yarn/service/ServiceScheduler.java   |  4 ++
 .../yarn/service/client/ServiceClient.java      | 26 ++++++----
 .../yarn/service/component/Component.java       | 53 ++++++++++++++++++--
 .../component/instance/ComponentInstance.java   |  6 +--
 .../yarn/service/TestYarnNativeServices.java    | 34 +++++++++++--
 6 files changed, 137 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b58101/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
index 1283604..75cc9c5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
+import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException;
 import org.apache.hadoop.yarn.service.monitor.ServiceMonitor;
 import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
@@ -237,6 +238,7 @@ public class ServiceMaster extends CompositeService {
       SliderFileSystem fs) throws IOException {
     context.service = ServiceApiUtil
         .loadServiceFrom(fs, new Path(serviceDefPath));
+    context.service.setState(ServiceState.ACCEPTED);
     LOG.info(context.service.toString());
   }
 
@@ -257,6 +259,41 @@ public class ServiceMaster extends CompositeService {
     super.serviceStop();
   }
 
+  // This method should be called whenever there is an increment or decrement
+  // of a READY state component of a service
+  public static synchronized void checkAndUpdateServiceState(
+      ServiceScheduler scheduler, boolean isIncrement) {
+    ServiceState curState = scheduler.getApp().getState();
+    if (!isIncrement) {
+      // set it to STARTED every time a component moves out of STABLE state
+      scheduler.getApp().setState(ServiceState.STARTED);
+    } else {
+      // otherwise check the state of all components
+      boolean isStable = true;
+      for (org.apache.hadoop.yarn.service.api.records.Component comp : scheduler
+          .getApp().getComponents()) {
+        if (comp.getState() !=
+            org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE) {
+          isStable = false;
+          break;
+        }
+      }
+      if (isStable) {
+        scheduler.getApp().setState(ServiceState.STABLE);
+      } else {
+        // mark new state as started only if current state is stable, otherwise
+        // leave it as is
+        if (curState == ServiceState.STABLE) {
+          scheduler.getApp().setState(ServiceState.STARTED);
+        }
+      }
+    }
+    if (curState != scheduler.getApp().getState()) {
+      LOG.info("Service state changed from {} -> {}", curState,
+          scheduler.getApp().getState());
+    }
+  }
+
   private void printSystemEnv() {
     for (Map.Entry<String, String> envs : System.getenv().entrySet()) {
       LOG.info("{} = {}", envs.getKey(), envs.getValue());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b58101/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index 2697050..45cdd28 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.service.api.ServiceApiConstants;
 import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.api.records.ConfigFile;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
@@ -284,6 +285,9 @@ public class ServiceScheduler extends CompositeService {
     }
     registerServiceInstance(context.attemptId, app);
 
+    // Since AM has been started and registered, the service is in STARTED state
+    app.setState(ServiceState.STARTED);
+
     // recover components based on containers sent from RM
     recoverComponents(response);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b58101/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 81c56d2..d1ccc4f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -268,7 +268,8 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
       long ret = orig - Long.parseLong(newNumber.substring(1));
       if (ret < 0) {
         LOG.warn(MessageFormat.format(
-            "[COMPONENT {}]: component count goes to negative ({}{} = {}), reset it to 0.",
+            "[COMPONENT {0}]: component count goes to negative ({1}{2} = {3}),"
+                + " ignore and reset it to 0.",
             component.getName(), orig, newNumber, ret));
         ret = 0;
       }
@@ -878,18 +879,23 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
     return newTimeout;
   }
 
-  public ServiceState convertState(FinalApplicationStatus status) {
-    switch (status) {
-    case UNDEFINED:
+  public ServiceState convertState(YarnApplicationState state) {
+    switch (state) {
+    case NEW:
+    case NEW_SAVING:
+    case SUBMITTED:
+    case ACCEPTED:
       return ServiceState.ACCEPTED;
-    case FAILED:
+    case RUNNING:
+      return ServiceState.STARTED;
+    case FINISHED:
     case KILLED:
-      return ServiceState.FAILED;
-    case ENDED:
-    case SUCCEEDED:
       return ServiceState.STOPPED;
+    case FAILED:
+      return ServiceState.FAILED;
+    default:
+      return ServiceState.ACCEPTED;
     }
-    return ServiceState.ACCEPTED;
   }
 
   public String getStatusString(String appId)
@@ -917,7 +923,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
     ApplicationReport appReport = yarnClient.getApplicationReport(currentAppId);
     Service appSpec = new Service();
     appSpec.setName(serviceName);
-    appSpec.setState(convertState(appReport.getFinalApplicationStatus()));
+    appSpec.setState(convertState(appReport.getYarnApplicationState()));
     ApplicationTimeout lifetime =
         appReport.getApplicationTimeouts().get(ApplicationTimeoutType.LIFETIME);
     if (lifetime != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b58101/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index 9c5cbae..a84c1b1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -31,7 +31,9 @@ import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceId;
 import org.apache.hadoop.yarn.service.ContainerFailureTracker;
 import org.apache.hadoop.yarn.service.ServiceContext;
 import org.apache.hadoop.yarn.service.ServiceScheduler;
+import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
+import org.apache.hadoop.yarn.service.ServiceMaster;
 import org.apache.hadoop.yarn.service.ServiceMetrics;
 import org.apache.hadoop.yarn.service.provider.ProviderUtils;
 import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
@@ -209,6 +211,7 @@ public class Component implements EventHandler<ComponentEvent> {
         component.createNumCompInstances(delta);
         component.componentSpec.setState(
             org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
+        component.getScheduler().getApp().setState(ServiceState.STARTED);
         return FLEXING;
       } else if (delta < 0){
         delta = 0 - delta;
@@ -229,14 +232,11 @@ public class Component implements EventHandler<ComponentEvent> {
           component.instanceIdCounter.decrementAndGet();
           instance.destroy();
         }
-        component.componentSpec.setState(
-            org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
+        checkAndUpdateComponentState(component, false);
         return STABLE;
       } else {
         LOG.info("[FLEX COMPONENT " + component.getName() + "]: already has " +
             event.getDesired() + " instances, ignoring");
-        component.componentSpec.setState(
-            org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
         return STABLE;
       }
     }
@@ -289,7 +289,7 @@ public class Component implements EventHandler<ComponentEvent> {
 
   private static ComponentState checkIfStable(Component component) {
     // if desired == running
-    if (component.componentMetrics.containersRunning.value() == component
+    if (component.componentMetrics.containersReady.value() == component
         .getComponentSpec().getNumberOfContainers()) {
       component.componentSpec.setState(
           org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
@@ -301,6 +301,46 @@ public class Component implements EventHandler<ComponentEvent> {
     }
   }
 
+  // This method should be called whenever there is an increment or decrement
+  // of a READY state container of a component
+  public static synchronized void checkAndUpdateComponentState(
+      Component component, boolean isIncrement) {
+    org.apache.hadoop.yarn.service.api.records.ComponentState curState =
+        component.componentSpec.getState();
+    if (isIncrement) {
+      // check if all containers are in READY state
+      if (component.componentMetrics.containersReady
+          .value() == component.componentMetrics.containersDesired.value()) {
+        component.componentSpec.setState(
+            org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
+        if (curState != component.componentSpec.getState()) {
+          LOG.info("[COMPONENT {}] state changed from {} -> {}",
+              component.componentSpec.getName(), curState,
+              component.componentSpec.getState());
+        }
+        // component state change will trigger re-check of service state
+        ServiceMaster.checkAndUpdateServiceState(component.scheduler,
+            isIncrement);
+      }
+    } else {
+      // container moving out of READY state could be because of FLEX down so
+      // still need to verify the count before changing the component state
+      if (component.componentMetrics.containersReady
+          .value() < component.componentMetrics.containersDesired.value()) {
+        component.componentSpec.setState(
+            org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
+        if (curState != component.componentSpec.getState()) {
+          LOG.info("[COMPONENT {}] state changed from {} -> {}",
+              component.componentSpec.getName(), curState,
+              component.componentSpec.getState());
+        }
+        // component state change will trigger re-check of service state
+        ServiceMaster.checkAndUpdateServiceState(component.scheduler,
+            isIncrement);
+      }
+    }
+  }
+
   private static class ContainerCompletedTransition extends BaseTransition {
     @Override
     public void transition(Component component, ComponentEvent event) {
@@ -310,6 +350,7 @@ public class Component implements EventHandler<ComponentEvent> {
               STOP).setStatus(event.getStatus()));
       component.componentSpec.setState(
           org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
+      component.getScheduler().getApp().setState(ServiceState.STARTED);
     }
   }
 
@@ -472,11 +513,13 @@ public class Component implements EventHandler<ComponentEvent> {
   public void incContainersReady() {
     componentMetrics.containersReady.incr();
     scheduler.getServiceMetrics().containersReady.incr();
+    checkAndUpdateComponentState(this, true);
   }
 
   public void decContainersReady() {
     componentMetrics.containersReady.decr();
     scheduler.getServiceMetrics().containersReady.decr();
+    checkAndUpdateComponentState(this, false);
   }
 
   public int getNumReadyInstances() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b58101/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
index 31fa5c7..0e3e11b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
@@ -147,7 +147,6 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
               new ContainerStatusRetriever(compInstance.scheduler,
                   event.getContainerId(), compInstance), 0, 1,
               TimeUnit.SECONDS);
-      compInstance.component.incRunningContainers();
       long containerStartTime = System.currentTimeMillis();
       try {
         ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils
@@ -171,6 +170,7 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
       compInstance.containerSpec = container;
       compInstance.getCompSpec().addContainer(container);
       compInstance.containerStartedTime = containerStartTime;
+      compInstance.component.incRunningContainers();
 
       if (compInstance.timelineServiceEnabled) {
         compInstance.serviceTimelinePublisher
@@ -183,8 +183,8 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
     @Override
     public void transition(ComponentInstance compInstance,
         ComponentInstanceEvent event) {
-      compInstance.component.incContainersReady();
       compInstance.containerSpec.setState(ContainerState.READY);
+      compInstance.component.incContainersReady();
       if (compInstance.timelineServiceEnabled) {
         compInstance.serviceTimelinePublisher
             .componentInstanceBecomeReady(compInstance.containerSpec);
@@ -196,8 +196,8 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
     @Override
     public void transition(ComponentInstance compInstance,
         ComponentInstanceEvent event) {
-      compInstance.component.decContainersReady();
       compInstance.containerSpec.setState(ContainerState.RUNNING_BUT_UNREADY);
+      compInstance.component.decContainersReady();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b58101/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
index 1c517d9..debab8b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.api.records.*;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.api.records.Component;
 import org.apache.hadoop.yarn.service.api.records.Container;
 import org.apache.hadoop.yarn.service.api.records.ContainerState;
@@ -90,25 +91,25 @@ public class TestYarnNativeServices extends ServiceTestUtils {
     // check app.json is persisted.
     Assert.assertTrue(
         getFS().exists(new Path(appDir, exampleApp.getName() + ".json")));
-    waitForAllCompToBeReady(client, exampleApp);
+    waitForServiceToBeStable(client, exampleApp);
 
     // Flex two components, each from 2 container to 3 containers.
     flexComponents(client, exampleApp, 3L);
     // wait for flex to be completed, increase from 2 to 3 containers.
-    waitForAllCompToBeReady(client, exampleApp);
+    waitForServiceToBeStable(client, exampleApp);
     // check all instances name for each component are in sequential order.
     checkCompInstancesInOrder(client, exampleApp);
 
     // flex down to 1
     flexComponents(client, exampleApp, 1L);
-    waitForAllCompToBeReady(client, exampleApp);
+    waitForServiceToBeStable(client, exampleApp);
     checkCompInstancesInOrder(client, exampleApp);
 
     // check component dir and registry are cleaned up.
 
     // flex up again to 2
     flexComponents(client, exampleApp, 2L);
-    waitForAllCompToBeReady(client, exampleApp);
+    waitForServiceToBeStable(client, exampleApp);
     checkCompInstancesInOrder(client, exampleApp);
 
     // stop the service
@@ -145,7 +146,7 @@ public class TestYarnNativeServices extends ServiceTestUtils {
     exampleApp.addComponent(compb);
 
     client.actionCreate(exampleApp);
-    waitForAllCompToBeReady(client, exampleApp);
+    waitForServiceToBeStable(client, exampleApp);
 
     // check that containers for compa are launched before containers for compb
     checkContainerLaunchDependencies(client, exampleApp, "compa", "compb");
@@ -372,6 +373,29 @@ public class TestYarnNativeServices extends ServiceTestUtils {
     return allContainers;
   }
 
+  /**
+   * Wait until service state becomes stable. A service is stable when all
+   * requested containers of all components are running and in ready state.
+   *
+   * @param client
+   * @param exampleApp
+   * @throws TimeoutException
+   * @throws InterruptedException
+   */
+  private void waitForServiceToBeStable(ServiceClient client,
+      Service exampleApp) throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(() -> {
+      try {
+        Service retrievedApp = client.getStatus(exampleApp.getName());
+        System.out.println(retrievedApp);
+        return retrievedApp.getState() == ServiceState.STABLE;
+      } catch (Exception e) {
+        e.printStackTrace();
+        return false;
+      }
+    }, 2000, 200000);
+  }
+
   private ServiceClient createClient() throws Exception {
     ServiceClient client = new ServiceClient() {
       @Override protected Path addJarResource(String appName,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/38] hadoop git commit: HDFS-12932. Fix confusing LOG message for block replication. Contributed by Chao Sun.

Posted by ae...@apache.org.
HDFS-12932. Fix confusing LOG message for block replication. Contributed by Chao Sun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a78db991
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a78db991
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a78db991

Branch: refs/heads/HDFS-7240
Commit: a78db9919065d06ced8122229530f44cc7369857
Parents: d62932c
Author: Wei Yan <we...@apache.org>
Authored: Wed Dec 20 08:55:46 2017 -0800
Committer: Wei Yan <we...@apache.org>
Committed: Wed Dec 20 08:55:46 2017 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java     | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a78db991/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 0dfaa8e..201605f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -414,9 +414,12 @@ public class FSDirAttrOp {
       if (oldBR > targetReplication) {
         FSDirectory.LOG.info("Decreasing replication from {} to {} for {}",
                              oldBR, targetReplication, iip.getPath());
-      } else {
+      } else if (oldBR < targetReplication) {
         FSDirectory.LOG.info("Increasing replication from {} to {} for {}",
                              oldBR, targetReplication, iip.getPath());
+      } else {
+        FSDirectory.LOG.info("Replication remains unchanged at {} for {}",
+                             oldBR, iip.getPath());
       }
     }
     return file.getBlocks();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/38] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.8.3.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.8.3.xml
new file mode 100644
index 0000000..331dd1e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.8.3.xml
@@ -0,0 +1,312 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:19:32 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop HDFS 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/classes:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec
 /2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/jline/jline/0.9.94/jline-0.9.94.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.8.3.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus/jettison/jettison/1.1/jet
 tison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4
 .1/snappy-java-1.0.4.1.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-2.8.3.jar:/maven/com/squareup/okhttp/okhttp/2.4.0/okhttp-2.4.0.jar:/maven/com/squareup/okio/okio/1.4.0/okio-1.4.0.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/commons-cli/commons-
 cli/1.2/commons-cli-1.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/apache/directory/api/api-i18n/1.0.0-M20/api-i18n-1.0.0-M20.jar:/maven/org/apache/directory/api/api-ldap-model/1.0.0-M20/api-ldap-model-1.0.0-M20.jar:/maven/org/apache/mina/mina-core/2.0.0-M5/mina-core-2.0.0-M5.jar:/maven/net/sf/ehcache/ehcache-core/2.4.4/ehcache-core-2.4.4.jar:/maven/antlr/antlr/2.7.7/antlr-2.7.7.jar:/maven/org/apache/directory/api/api-asn1-ber/1.0.0-M20/api-asn1-ber-1.0.0-M20.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/maven/org/
 codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/io/netty/netty-all/4.0.23.Final/netty-all-4.0.23.Final.jar:/maven/xerces/xercesImpl/2.9.1/xercesImpl-2.9.1.jar:/maven/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar -sourcepath /build/source/hadoop-hdfs-project/hadoop-hdfs/src/main/java -apidir /build/source/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname Apache Hadoop HDFS 2.8.3 -->
+<package name="org.apache.hadoop.hdfs">
+  <doc>
+  <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}.  This is loosely modelled after
+Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files 
+have strictly one writer at any one time.  Bytes are always appended 
+to the end of the writer's stream.  There is no notion of "record appends"
+or "mutations" that are then checked or reordered.  Writers simply emit 
+a byte stream.  That byte stream is guaranteed to be stored in the 
+order written.</p>]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.net">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl">
+</package>
+<package name="org.apache.hadoop.hdfs.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.client">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.server">
+  <!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+  <interface name="JournalNodeMXBean"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getJournalsStatus" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals.
+ 
+ @return A string presenting status for each journal]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the JMX management interface for JournalNode information]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.block">
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.delegation">
+</package>
+<package name="org.apache.hadoop.hdfs.server.balancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.blockmanagement">
+</package>
+<package name="org.apache.hadoop.hdfs.server.common">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs">
+</package>
+<package name="org.apache.hadoop.hdfs.server.mover">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode">
+  <!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <interface name="AuditLogger"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Called during initialization of the logger.
+
+ @param conf The configuration object.]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <doc>
+      <![CDATA[Called to log an audit event.
+ <p>
+ This method must return as quickly as possible, since it's called
+ in a critical section of the NameNode's operation.
+
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's
+             metadata (permissions, owner, times, etc).]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface defining an audit logger.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <class name="HdfsAuditLogger" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/>
+    <constructor name="HdfsAuditLogger"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
+    </method>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="callerContext" type="org.apache.hadoop.ipc.CallerContext"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String, String,
+ FileStatus)} with additional parameters related to logging delegation token
+ tracking IDs.
+ 
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's metadata
+          (permissions, owner, times, etc).
+ @param callerContext Context information of the caller
+ @param ugi UserGroupInformation of the current user, or null if not logging
+          token tracking information
+ @param dtSecretManager The token secret manager, or null if not logging
+          token tracking information]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String,
+ String, FileStatus, CallerContext, UserGroupInformation,
+ DelegationTokenSecretManager)} without {@link CallerContext} information.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Extension of {@link AuditLogger}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+  <class name="INodeAttributeProvider" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="INodeAttributeProvider"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="start"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Initialize the provider. This method is called at NameNode startup
+ time.]]>
+      </doc>
+    </method>
+    <method name="stop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]>
+      </doc>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fullPath" type="java.lang.String"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pathElements" type="java.lang.String[]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="components" type="byte[][]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/>
+      <doc>
+      <![CDATA[Can be over-ridden by implementations to provide a custom Access Control
+ Enforcer that can provide an alternate implementation of the
+ default permission checking logic.
+ @param defaultEnforcer The Default AccessControlEnforcer
+ @return The AccessControlEnforcer to use]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.ha">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.window">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.web.resources">
+</package>
+<package name="org.apache.hadoop.hdfs.server.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.tools">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.util">
+</package>
+<package name="org.apache.hadoop.hdfs.web">
+</package>
+<package name="org.apache.hadoop.hdfs.web.resources">
+</package>
+
+</api>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.8.3.xml b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.8.3.xml
new file mode 100644
index 0000000..b3d52bf
--- /dev/null
+++ b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.8.3.xml
@@ -0,0 +1,113 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:51:53 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop MapReduce Common 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/hadoop-annotations.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/jdiff.jar -verbose -classpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/classes:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.8.3.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.8.3.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/javax/servlet/serv
 let-api/2.5/servlet-api-2.5.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/com/google/inject/guice/3.0/guice-3.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.
 2.3-1.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-yarn-client-2.8.3.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-mapreduce-client-core-2.8.3.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/hadoop-yarn-server-common-2.8.3.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.8.3.jar:/maven/org/apache/commons/c
 ommons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/build/source/hadoop-common-projec
 t/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:
 /maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/maven/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar -sourcepath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java -apidir /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/targ
 et/site/jdiff/xml -apiname Apache Hadoop MapReduce Common 2.8.3 -->
+<package name="org.apache.hadoop.mapred">
+</package>
+<package name="org.apache.hadoop.mapreduce">
+</package>
+<package name="org.apache.hadoop.mapreduce.v2.api.protocolrecords">
+  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest -->
+  <interface name="CancelDelegationTokenRequest"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setDelegationToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dToken" type="org.apache.hadoop.yarn.api.records.Token"/>
+    </method>
+    <doc>
+    <![CDATA[The request issued by the client to the {@code ResourceManager} to cancel a
+ delegation token.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest -->
+  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse -->
+  <interface name="CancelDelegationTokenResponse"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <doc>
+    <![CDATA[The response from the {@code ResourceManager} to a cancelDelegationToken
+ request.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse -->
+  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest -->
+  <interface name="GetDelegationTokenRequest"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getRenewer" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setRenewer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="java.lang.String"/>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest -->
+  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest -->
+  <interface name="RenewDelegationTokenRequest"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setDelegationToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dToken" type="org.apache.hadoop.yarn.api.records.Token"/>
+    </method>
+    <doc>
+    <![CDATA[The request issued by the client to renew a delegation token from
+ the {@code ResourceManager}.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest -->
+  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse -->
+  <interface name="RenewDelegationTokenResponse"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getNextExpirationTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setNextExpirationTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="expTime" type="long"/>
+    </method>
+    <doc>
+    <![CDATA[The response to a renewDelegationToken call to the {@code ResourceManager}.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse -->
+</package>
+<package name="org.apache.hadoop.mapreduce.v2.security">
+</package>
+<package name="org.apache.hadoop.yarn.proto">
+</package>
+
+</api>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/38] hadoop git commit: HDFS-12938. TestErasureCodigCLI testAll failing consistently. (Contributed by Ajay Kumar)

Posted by ae...@apache.org.
HDFS-12938. TestErasureCodigCLI testAll failing consistently. (Contributed by Ajay Kumar)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b318bed0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b318bed0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b318bed0

Branch: refs/heads/HDFS-7240
Commit: b318bed01affa150d70661f263efff9a5c9422f6
Parents: c8ff0cc
Author: Lei Xu <le...@apache.org>
Authored: Thu Dec 21 10:28:24 2017 -0800
Committer: Lei Xu <le...@apache.org>
Committed: Thu Dec 21 10:28:24 2017 -0800

----------------------------------------------------------------------
 .../hadoop-hdfs/src/test/resources/testErasureCodingConf.xml     | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b318bed0/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index bd451eb..fc0c060 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -311,7 +311,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Warning: setting erasure coding policy on an non-empty directory will not automatically convert existing data to RS-6-3-1024</expected-output>
+          <expected-output>Warning: setting erasure coding policy on a non-empty directory will not automatically convert existing files to RS-6-3-1024</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -353,7 +353,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Warning: unsetting erasure coding policy on an non-empty directory will not automatically convert existing data to replicated data</expected-output>
+          <expected-output>Warning: unsetting erasure coding policy on a non-empty directory will not automatically convert existing files to replicated data</expected-output>
         </comparator>
       </comparators>
     </test>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/38] hadoop git commit: YARN-7542. Fix issue that causes some Running Opportunistic Containers to be recovered as PAUSED. (Sampada Dehankar via asuresh)

Posted by ae...@apache.org.
YARN-7542. Fix issue that causes some Running Opportunistic Containers to be recovered as PAUSED. (Sampada Dehankar via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a55884c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a55884c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a55884c6

Branch: refs/heads/HDFS-7240
Commit: a55884c68eb175f1c9f61771386c086bf1ee65a9
Parents: 5bf7e59
Author: Arun Suresh <as...@apache.org>
Authored: Thu Dec 28 22:20:42 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Thu Dec 28 22:20:42 2017 -0800

----------------------------------------------------------------------
 .../containermanager/launcher/RecoveredContainerLaunch.java        | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a55884c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
index a3ccf00..17ddd77 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
@@ -72,7 +72,7 @@ public class RecoveredContainerLaunch extends ContainerLaunch {
     String containerIdStr = containerId.toString();
 
     dispatcher.getEventHandler().handle(new ContainerEvent(containerId,
-        ContainerEventType.RECOVER_PAUSED_CONTAINER));
+        ContainerEventType.CONTAINER_LAUNCHED));
 
     boolean notInterrupted = true;
     try {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/38] hadoop git commit: YARN-7662. [ATSv2] Define new set of configurations for reader and collectors to bind (Rohith Sharma K S via Varun Saxena)

Posted by ae...@apache.org.
YARN-7662. [ATSv2] Define new set of configurations for reader and collectors to bind (Rohith Sharma K S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0aeb666
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0aeb666
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0aeb666

Branch: refs/heads/HDFS-7240
Commit: c0aeb666a4d43aac196569d9ec6768d62139d2b9
Parents: fe5b057
Author: Varun Saxena <va...@apache.org>
Authored: Tue Dec 19 22:29:24 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Tue Dec 19 22:29:24 2017 +0530

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     | 45 +++++++++++++++++++-
 .../hadoop/yarn/webapp/util/WebAppUtils.java    | 39 ++++++++++++++---
 .../src/main/resources/yarn-default.xml         | 24 +++++++++++
 .../security/TestTimelineAuthFilterForV2.java   |  3 +-
 .../AbstractTimelineReaderHBaseTestBase.java    |  2 +-
 .../collector/NodeTimelineCollectorManager.java | 23 +++++++---
 .../reader/TimelineReaderServer.java            | 25 ++++++++---
 .../reader/TestTimelineReaderServer.java        |  6 +--
 .../reader/TestTimelineReaderWebServices.java   |  2 +-
 .../src/site/markdown/TimelineServiceV2.md      |  6 +--
 10 files changed, 145 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index e57f988..1b6bd0e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2389,6 +2389,9 @@ public class YarnConfiguration extends Configuration {
   /**
    * Settings for timeline service v2.0.
    */
+  public static final String TIMELINE_SERVICE_READER_PREFIX =
+      TIMELINE_SERVICE_PREFIX + "reader.";
+
   public static final String TIMELINE_SERVICE_WRITER_CLASS =
       TIMELINE_SERVICE_PREFIX + "writer.class";
 
@@ -2397,7 +2400,7 @@ public class YarnConfiguration extends Configuration {
           + ".storage.HBaseTimelineWriterImpl";
 
   public static final String TIMELINE_SERVICE_READER_CLASS =
-      TIMELINE_SERVICE_PREFIX + "reader.class";
+      TIMELINE_SERVICE_READER_PREFIX + "class";
 
   public static final String DEFAULT_TIMELINE_SERVICE_READER_CLASS =
       "org.apache.hadoop.yarn.server.timelineservice.storage" +
@@ -3422,6 +3425,46 @@ public class YarnConfiguration extends Configuration {
   public static final String TIMELINE_XFS_OPTIONS =
       TIMELINE_XFS_PREFIX + "xframe-options";
 
+  /**
+   * Settings for timeline reader.
+   */
+  public static final String TIMELINE_SERVICE_READER_BIND_HOST =
+      TIMELINE_SERVICE_READER_PREFIX + "bind-host";
+
+  public static final String TIMELINE_SERVICE_READER_WEBAPP_ADDRESS =
+      TIMELINE_SERVICE_READER_PREFIX + "webapp.address";
+  public static final String DEFAULT_TIMELINE_SERVICE_READER_WEBAPP_ADDRESS =
+      DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS;
+
+  public static final String TIMELINE_SERVICE_READER_WEBAPP_HTTPS_ADDRESS =
+      TIMELINE_SERVICE_READER_PREFIX + "webapp.https.address";
+  public static final String
+      DEFAULT_TIMELINE_SERVICE_READER_WEBAPP_HTTPS_ADDRESS =
+      DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS;
+
+  /**
+   * Marked collector properties as Private since it run as auxillary service.
+   */
+  public static final String TIMELINE_SERVICE_COLLECTOR_PREFIX =
+      TIMELINE_SERVICE_PREFIX + "collector.";
+
+  @Private
+  public static final String TIMELINE_SERVICE_COLLECTOR_BIND_HOST =
+      TIMELINE_SERVICE_COLLECTOR_PREFIX + "bind-host";
+
+  @Private
+  public static final String TIMELINE_SERVICE_COLLECTOR_WEBAPP_ADDRESS =
+      TIMELINE_SERVICE_COLLECTOR_PREFIX + "webapp.address";
+  public static final String DEFAULT_TIMELINE_SERVICE_COLLECTOR_WEBAPP_ADDRESS =
+      DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS;
+
+  @Private
+  public static final String TIMELINE_SERVICE_COLLECTOR_WEBAPP_HTTPS_ADDRESS =
+      TIMELINE_SERVICE_COLLECTOR_PREFIX + "webapp.https.address";
+  public static final String
+      DEFAULT_TIMELINE_SERVICE_COLLECTOR_WEBAPP_HTTPS_ADDRESS =
+      DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS;
+
   public YarnConfiguration() {
     super();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index 446f0a1..e62bf10 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -314,16 +314,41 @@ public class WebAppUtils {
   }
 
   public static String getAHSWebAppURLWithoutScheme(Configuration conf) {
-    return getTimelineReaderWebAppURL(conf);
-  }
-
-  public static String getTimelineReaderWebAppURL(Configuration conf) {
     if (YarnConfiguration.useHttps(conf)) {
       return conf.get(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS,
-        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS);
+          YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS);
     } else {
       return conf.get(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
-        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS);
+          YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS);
+    }
+  }
+
+  public static String getTimelineReaderWebAppURLWithoutScheme(
+      Configuration conf) {
+    if (YarnConfiguration.useHttps(conf)) {
+      return conf
+          .get(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_HTTPS_ADDRESS,
+              YarnConfiguration.
+                  DEFAULT_TIMELINE_SERVICE_READER_WEBAPP_HTTPS_ADDRESS);
+    } else {
+      return conf.get(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_ADDRESS,
+          YarnConfiguration.
+              DEFAULT_TIMELINE_SERVICE_READER_WEBAPP_ADDRESS);
+    }
+  }
+
+  public static String getTimelineCollectorWebAppURLWithoutScheme(
+      Configuration conf) {
+    if (YarnConfiguration.useHttps(conf)) {
+      return conf.get(
+          YarnConfiguration.TIMELINE_SERVICE_COLLECTOR_WEBAPP_HTTPS_ADDRESS,
+          YarnConfiguration.
+              DEFAULT_TIMELINE_SERVICE_COLLECTOR_WEBAPP_HTTPS_ADDRESS);
+    } else {
+      return conf
+          .get(YarnConfiguration.TIMELINE_SERVICE_COLLECTOR_WEBAPP_ADDRESS,
+              YarnConfiguration.
+                  DEFAULT_TIMELINE_SERVICE_COLLECTOR_WEBAPP_ADDRESS);
     }
   }
 
@@ -342,7 +367,7 @@ public class WebAppUtils {
       return schemePrefix + url;
     }
   }
-  
+
   public static String getRunningLogURL(
       String nodeHttpAddress, String containerId, String user) {
     if (nodeHttpAddress == null || nodeHttpAddress.isEmpty() ||

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 192f62e..d450eca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3599,4 +3599,28 @@
     <value>0,1</value>
   </property>
 
+  <property>
+    <description>The http address of the timeline reader web application.</description>
+    <name>yarn.timeline-service.reader.webapp.address</name>
+    <value>${yarn.timeline-service.webapp.address}</value>
+  </property>
+
+  <property>
+    <description>The https address of the timeline reader web application.</description>
+    <name>yarn.timeline-service.reader.webapp.https.address</name>
+    <value>${yarn.timeline-service.webapp.https.address}</value>
+  </property>
+
+  <property>
+    <description>
+      The actual address timeline reader will bind to. If this optional address is
+      set, the reader server will bind to this address and the port specified in
+      yarn.timeline-service.reader.webapp.address.
+      This is most useful for making the service listen to all interfaces by setting to
+      0.0.0.0.
+    </description>
+    <name>yarn.timeline-service.reader.bind-host</name>
+    <value></value>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
index 75f17fb..bb511d8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
@@ -162,7 +162,8 @@ public class TestTimelineAuthFilterForV2 {
       conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
       conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
           FileSystemTimelineWriterImpl.class, TimelineWriter.class);
-      conf.set(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST, "localhost");
+      conf.set(YarnConfiguration.TIMELINE_SERVICE_READER_BIND_HOST,
+          "localhost");
       conf.set(FileSystemTimelineWriterImpl.TIMELINE_SERVICE_STORAGE_DIR_ROOT,
           TEST_ROOT_DIR.getAbsolutePath());
       conf.set("hadoop.proxyuser.HTTP.hosts", "*");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
index 3519c3f..471fb6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
@@ -79,7 +79,7 @@ public abstract class AbstractTimelineReaderHBaseTestBase {
       Configuration config = util.getConfiguration();
       config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
       config.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
-      config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+      config.set(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_ADDRESS,
           "localhost:0");
       config.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
       config.set(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
index 68a68f0..696f4a3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
@@ -145,10 +145,9 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
 
   private void doSecureLogin() throws IOException {
     Configuration conf = getConfig();
-    InetSocketAddress addr = NetUtils.createSocketAddr(conf.getTrimmed(
-        YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
-            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_BIND_HOST), 0,
-                YarnConfiguration.TIMELINE_SERVICE_BIND_HOST);
+    String webAppURLWithoutScheme =
+        WebAppUtils.getTimelineCollectorWebAppURLWithoutScheme(conf);
+    InetSocketAddress addr = NetUtils.createSocketAddr(webAppURLWithoutScheme);
     SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
         YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL, addr.getHostName());
   }
@@ -277,8 +276,20 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
         initializers, defaultInitializers, tokenMgrService);
     TimelineServerUtils.setTimelineFilters(
         conf, initializers, defaultInitializers);
-    String bindAddress = conf.get(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
-        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_BIND_HOST) + ":0";
+
+    String bindAddress = null;
+    String host =
+        conf.getTrimmed(YarnConfiguration.TIMELINE_SERVICE_COLLECTOR_BIND_HOST);
+    if (host == null || host.isEmpty()) {
+      // if collector bind-host is not set, fall back to
+      // timeline-service.bind-host to maintain compatibility
+      bindAddress =
+          conf.get(YarnConfiguration.DEFAULT_TIMELINE_SERVICE_BIND_HOST,
+              YarnConfiguration.DEFAULT_TIMELINE_SERVICE_BIND_HOST) + ":0";
+    } else {
+      bindAddress = host + ":0";
+    }
+
     try {
       HttpServer2.Builder builder = new HttpServer2.Builder()
           .setName("timeline")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
index 5c049ea..3cc24ea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.service.CompositeService;
@@ -63,6 +64,8 @@ public class TimelineReaderServer extends CompositeService {
 
   private HttpServer2 readerWebServer;
   private TimelineReaderManager timelineReaderManager;
+  private String webAppURLWithoutScheme;
+
 
   public TimelineReaderServer() {
     super(TimelineReaderServer.class.getName());
@@ -73,10 +76,10 @@ public class TimelineReaderServer extends CompositeService {
     if (!YarnConfiguration.timelineServiceV2Enabled(conf)) {
       throw new YarnException("timeline service v.2 is not enabled");
     }
-    InetSocketAddress bindAddr = conf.getSocketAddr(
-        YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
-            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS,
-                YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT);
+    webAppURLWithoutScheme =
+        WebAppUtils.getTimelineReaderWebAppURLWithoutScheme(conf);
+    InetSocketAddress bindAddr =
+        NetUtils.createSocketAddr(webAppURLWithoutScheme);
     // Login from keytab if security is enabled.
     try {
       SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
@@ -170,9 +173,17 @@ public class TimelineReaderServer extends CompositeService {
   private void startTimelineReaderWebApp() {
     Configuration conf = getConfig();
     addFilters(conf);
-    String bindAddress = WebAppUtils.getWebAppBindURL(conf,
-        YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
-        WebAppUtils.getTimelineReaderWebAppURL(conf));
+
+    String hostProperty = YarnConfiguration.TIMELINE_SERVICE_READER_BIND_HOST;
+    String host = conf.getTrimmed(hostProperty);
+    if (host == null || host.isEmpty()) {
+      // if reader bind-host is not set, fall back to timeline-service.bind-host
+      // to maintain compatibility
+      hostProperty = YarnConfiguration.TIMELINE_SERVICE_BIND_HOST;
+    }
+    String bindAddress = WebAppUtils
+        .getWebAppBindURL(conf, hostProperty, webAppURLWithoutScheme);
+
     LOG.info("Instantiating TimelineReaderWebApp at " + bindAddress);
     try {
       HttpServer2.Builder builder = new HttpServer2.Builder()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderServer.java
index bb96f37..6fc46cc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderServer.java
@@ -37,7 +37,7 @@ public class TestTimelineReaderServer {
     Configuration config = new YarnConfiguration();
     config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
     config.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
-    config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+    config.set(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_ADDRESS,
         "localhost:0");
     config.setClass(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,
         FileSystemTimelineReaderImpl.class, TimelineReader.class);
@@ -61,7 +61,7 @@ public class TestTimelineReaderServer {
     Configuration conf = new YarnConfiguration();
     conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
     conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
-    conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+    conf.set(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_ADDRESS,
         "localhost:0");
     conf.set(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,
         Object.class.getName());
@@ -75,7 +75,7 @@ public class TestTimelineReaderServer {
     Configuration conf = new YarnConfiguration();
     conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
     conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
-    conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+    conf.set(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_ADDRESS,
         "localhost:0");
     conf.set(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,
         nonexistentTimelineReaderClass);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java
index f760834..03939ad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java
@@ -84,7 +84,7 @@ public class TestTimelineReaderWebServices {
       Configuration config = new YarnConfiguration();
       config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
       config.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
-      config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+      config.set(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_ADDRESS,
           "localhost:0");
       config.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
       config.setClass(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 7c51ce0..022f76d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -133,9 +133,9 @@ New configuration parameters that are introduced with v.2 are marked bold.
 | Configuration Property | Description |
 |:---- |:---- |
 | `yarn.timeline-service.hostname` | The hostname of the Timeline service web application. Defaults to `0.0.0.0` |
-| `yarn.timeline-service.address` | Address for the Timeline server to start the RPC server. Defaults to `${yarn.timeline-service.hostname}:10200`. |
-| `yarn.timeline-service.webapp.address` | The http address of the Timeline service web application. Defaults to `${yarn.timeline-service.hostname}:8188`. |
-| `yarn.timeline-service.webapp.https.address` | The https address of the Timeline service web application. Defaults to `${yarn.timeline-service.hostname}:8190`. |
+| `yarn.timeline-service.reader.webapp.address` | The http address of the Timeline Reader web application. Defaults to `${yarn.timeline-service.hostname}:8188`. |
+| `yarn.timeline-service.reader.webapp.https.address` | The https address of the Timeline Reader web application. Defaults to `${yarn.timeline-service.hostname}:8190`. |
+| `yarn.timeline-service.reader.bind-host` | The actual address the timeline reader will bind to. If this optional address is set, reader server will bind to this address and the port specified in yarn.timeline-service.reader.webapp.address. This is most useful for making the service listen on all interfaces by setting to 0.0.0.0. |
 | **`yarn.timeline-service.hbase.configuration.file`** | Optional URL to an hbase-site.xml configuration file to be used to connect to the timeline-service hbase cluster. If empty or not specified, then the HBase configuration will be loaded from the classpath. When specified the values in the specified configuration file will override those from the ones that are present on the classpath. Defaults to `null`. |
 | **`yarn.timeline-service.writer.flush-interval-seconds`** | The setting that controls how often the timeline collector flushes the timeline writer. Defaults to `60`. |
 | **`yarn.timeline-service.app-collector.linger-period.ms`** | Time period till which the application collector will be alive in NM, after the  application master container finishes. Defaults to `1000` (1 second). |


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/38] hadoop git commit: HDFS-12937. RBF: Add more unit tests for router admin commands. Contributed by Yiqun Lin.

Posted by ae...@apache.org.
HDFS-12937. RBF: Add more unit tests for router admin commands. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e040c97b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e040c97b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e040c97b

Branch: refs/heads/HDFS-7240
Commit: e040c97b7743469f363eeae52c8abcf4fe7c65d5
Parents: a7f8caf
Author: Yiqun Lin <yq...@apache.org>
Authored: Tue Dec 19 15:31:34 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Tue Dec 19 15:31:34 2017 +0800

----------------------------------------------------------------------
 .../hdfs/tools/federation/RouterAdmin.java      |   4 +-
 .../federation/router/TestRouterAdminCLI.java   | 121 ++++++++++++++++++-
 2 files changed, 120 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e040c97b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index a91a602..fd961f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -132,11 +132,11 @@ public class RouterAdmin extends Configured implements Tool {
     try {
       if ("-add".equals(cmd)) {
         if (addMount(argv, i)) {
-          System.err.println("Successfuly added mount point " + argv[i]);
+          System.out.println("Successfuly added mount point " + argv[i]);
         }
       } else if ("-rm".equals(cmd)) {
         if (removeMount(argv[i])) {
-          System.err.println("Successfully removed mount point " + argv[i]);
+          System.out.println("Successfully removed mount point " + argv[i]);
         }
       } else if ("-ls".equals(cmd)) {
         if (argv.length > 1) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e040c97b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
index 3882b8b..9e82967 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
@@ -18,16 +18,20 @@
 package org.apache.hadoop.hdfs.server.federation.router;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
+import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
 import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
@@ -36,6 +40,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.apache.hadoop.hdfs.tools.federation.RouterAdmin;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ToolRunner;
+import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -83,12 +88,123 @@ public class TestRouterAdminCLI {
   }
 
   @AfterClass
-  public static void tearDown() {
+  public static void tearDownCluster() {
     cluster.stopRouter(routerContext);
     cluster.shutdown();
     cluster = null;
   }
 
+  @After
+  public void tearDown() {
+    // set back system out
+    System.setOut(OLD_OUT);
+  }
+
+  @Test
+  public void testAddMountTable() throws Exception {
+    String nsId = "ns0";
+    String src = "/test-addmounttable";
+    String dest = "/addmounttable";
+    String[] argv = new String[] {"-add", src, nsId, dest};
+    Assert.assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
+        .newInstance(src);
+    GetMountTableEntriesResponse getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    MountTable mountTable = getResponse.getEntries().get(0);
+
+    List<RemoteLocation> destinations = mountTable.getDestinations();
+    assertEquals(1, destinations.size());
+
+    assertEquals(src, mountTable.getSourcePath());
+    assertEquals(nsId, destinations.get(0).getNameserviceId());
+    assertEquals(dest, destinations.get(0).getDest());
+    assertFalse(mountTable.isReadOnly());
+
+    // test mount table update behavior
+    dest = dest + "-new";
+    argv = new String[] {"-add", src, nsId, dest, "-readonly"};
+    Assert.assertEquals(0, ToolRunner.run(admin, argv));
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+
+    getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    mountTable = getResponse.getEntries().get(0);
+    assertEquals(2, mountTable.getDestinations().size());
+    assertEquals(nsId, mountTable.getDestinations().get(1).getNameserviceId());
+    assertEquals(dest, mountTable.getDestinations().get(1).getDest());
+    assertTrue(mountTable.isReadOnly());
+  }
+
+  @Test
+  public void testListMountTable() throws Exception {
+    String nsId = "ns0";
+    String src = "/test-lsmounttable";
+    String dest = "/lsmounttable";
+    String[] argv = new String[] {"-add", src, nsId, dest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    // re-set system out for testing
+    System.setOut(new PrintStream(out));
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    argv = new String[] {"-ls", src};
+    assertEquals(0, ToolRunner.run(admin, argv));
+    assertTrue(out.toString().contains(src));
+
+    out.reset();
+    GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
+        .newInstance("/");
+    GetMountTableEntriesResponse getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+
+    // Test ls command without input path, it will list
+    // mount table under root path.
+    argv = new String[] {"-ls"};
+    assertEquals(0, ToolRunner.run(admin, argv));
+    assertTrue(out.toString().contains(src));
+    String outStr = out.toString();
+    // verify if all the mount table are listed
+    for(MountTable entry: getResponse.getEntries()) {
+      assertTrue(outStr.contains(entry.getSourcePath()));
+    }
+  }
+
+  @Test
+  public void testRemoveMountTable() throws Exception {
+    String nsId = "ns0";
+    String src = "/test-rmmounttable";
+    String dest = "/rmmounttable";
+    String[] argv = new String[] {"-add", src, nsId, dest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
+        .newInstance(src);
+    GetMountTableEntriesResponse getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    // ensure mount table added successfully
+    MountTable mountTable = getResponse.getEntries().get(0);
+    assertEquals(src, mountTable.getSourcePath());
+
+    argv = new String[] {"-rm", src};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    assertEquals(0, getResponse.getEntries().size());
+
+    // remove an invalid mount table
+    String invalidPath = "/invalid";
+    System.setOut(new PrintStream(out));
+    argv = new String[] {"-rm", invalidPath};
+    assertEquals(0, ToolRunner.run(admin, argv));
+    assertTrue(out.toString().contains(
+        "Cannot remove mount point " + invalidPath));
+  }
+
   @Test
   public void testMountTableDefaultACL() throws Exception {
     String[] argv = new String[] {"-add", "/testpath0", "ns0", "/testdir0"};
@@ -140,8 +256,7 @@ public class TestRouterAdminCLI {
     assertEquals(0, ToolRunner.run(admin, argv));
     verifyExecutionResult("/testpath2-3", true, 0, 0);
 
-    // set back system out and login user
-    System.setOut(OLD_OUT);
+    // set back login user
     remoteUser = UserGroupInformation.createRemoteUser(superUser);
     UserGroupInformation.setLoginUser(remoteUser);
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/38] hadoop git commit: HADOOP-15113. NPE in S3A getFileStatus: null instrumentation on using closed instance. Contributed by Steve Loughran.

Posted by ae...@apache.org.
HADOOP-15113. NPE in S3A getFileStatus: null instrumentation on using closed instance.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef450df4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef450df4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef450df4

Branch: refs/heads/HDFS-7240
Commit: ef450df443f1dea1c52082cf281f25db7141972f
Parents: d2d8f4a
Author: Steve Loughran <st...@apache.org>
Authored: Thu Dec 21 14:15:53 2017 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Dec 21 14:15:53 2017 +0000

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 53 ++++++++---
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  3 +
 .../apache/hadoop/fs/s3a/ITestS3AClosedFS.java  | 92 ++++++++++++++++++++
 3 files changed, 135 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef450df4/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 9431f17..f461c9e 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -187,6 +187,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   private long readAhead;
   private S3AInputPolicy inputPolicy;
   private final AtomicBoolean closed = new AtomicBoolean(false);
+  private volatile boolean isClosed = false;
   private MetadataStore metadataStore;
   private boolean allowAuthoritative;
 
@@ -678,7 +679,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
    */
   public FSDataInputStream open(Path f, int bufferSize)
       throws IOException {
-
+    checkNotClosed();
     LOG.debug("Opening '{}' for reading; input policy = {}", f, inputPolicy);
     final FileStatus fileStatus = getFileStatus(f);
     if (fileStatus.isDirectory()) {
@@ -722,6 +723,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   public FSDataOutputStream create(Path f, FsPermission permission,
       boolean overwrite, int bufferSize, short replication, long blockSize,
       Progressable progress) throws IOException {
+    checkNotClosed();
     final Path path = qualify(f);
     String key = pathToKey(path);
     FileStatus status = null;
@@ -871,7 +873,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
     Path dst = qualify(dest);
 
     LOG.debug("Rename path {} to {}", src, dst);
-    incrementStatistic(INVOCATION_RENAME);
+    entryPoint(INVOCATION_RENAME);
 
     String srcKey = pathToKey(src);
     String dstKey = pathToKey(dst);
@@ -1098,6 +1100,17 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   }
 
   /**
+   * Entry point to an operation.
+   * Increments the statistic; verifies the FS is active.
+   * @param operation The operation to increment
+   * @throws IOException if the
+   */
+  protected void entryPoint(Statistic operation) throws IOException {
+    checkNotClosed();
+    incrementStatistic(operation);
+  }
+
+  /**
    * Increment a statistic by 1.
    * @param statistic The operation to increment
    */
@@ -1660,6 +1673,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   @Retries.RetryTranslated
   public boolean delete(Path f, boolean recursive) throws IOException {
     try {
+      checkNotClosed();
       return innerDelete(innerGetFileStatus(f, true), recursive);
     } catch (FileNotFoundException e) {
       LOG.debug("Couldn't delete {} - does not exist", f);
@@ -1838,7 +1852,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
     Path path = qualify(f);
     String key = pathToKey(path);
     LOG.debug("List status for path: {}", path);
-    incrementStatistic(INVOCATION_LIST_STATUS);
+    entryPoint(INVOCATION_LIST_STATUS);
 
     List<FileStatus> result;
     final FileStatus fileStatus =  getFileStatus(path);
@@ -1981,7 +1995,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
       throws IOException, FileAlreadyExistsException, AmazonClientException {
     Path f = qualify(p);
     LOG.debug("Making directory: {}", f);
-    incrementStatistic(INVOCATION_MKDIRS);
+    entryPoint(INVOCATION_MKDIRS);
     FileStatus fileStatus;
     List<Path> metadataStoreDirs = null;
     if (hasMetadataStore()) {
@@ -2058,7 +2072,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   @Retries.RetryTranslated
   S3AFileStatus innerGetFileStatus(final Path f,
       boolean needEmptyDirectoryFlag) throws IOException {
-    incrementStatistic(INVOCATION_GET_FILE_STATUS);
+    entryPoint(INVOCATION_GET_FILE_STATUS);
     final Path path = qualify(f);
     String key = pathToKey(path);
     LOG.debug("Getting path status for {}  ({})", path, key);
@@ -2319,7 +2333,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   private void innerCopyFromLocalFile(boolean delSrc, boolean overwrite,
       Path src, Path dst)
       throws IOException, FileAlreadyExistsException, AmazonClientException {
-    incrementStatistic(INVOCATION_COPY_FROM_LOCAL_FILE);
+    entryPoint(INVOCATION_COPY_FROM_LOCAL_FILE);
     LOG.debug("Copying local file from {} to {}", src, dst);
 
     // Since we have a local file, we don't need to stream into a temporary file
@@ -2418,6 +2432,8 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
       // already closed
       return;
     }
+    isClosed = true;
+    LOG.debug("Filesystem {} is closed", uri);
     try {
       super.close();
     } finally {
@@ -2435,6 +2451,17 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   }
 
   /**
+   * Verify that the input stream is open. Non blocking; this gives
+   * the last state of the volatile {@link #closed} field.
+   * @throws IOException if the connection is closed.
+   */
+  private void checkNotClosed() throws IOException {
+    if (isClosed) {
+      throw new IOException(uri + ": " + E_FS_CLOSED);
+    }
+  }
+
+  /**
    * Override getCanonicalServiceName because we don't support token in S3A.
    */
   @Override
@@ -2860,7 +2887,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
    */
   @Override
   public FileStatus[] globStatus(Path pathPattern) throws IOException {
-    incrementStatistic(INVOCATION_GLOB_STATUS);
+    entryPoint(INVOCATION_GLOB_STATUS);
     return super.globStatus(pathPattern);
   }
 
@@ -2871,7 +2898,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   @Override
   public FileStatus[] globStatus(Path pathPattern, PathFilter filter)
       throws IOException {
-    incrementStatistic(INVOCATION_GLOB_STATUS);
+    entryPoint(INVOCATION_GLOB_STATUS);
     return super.globStatus(pathPattern, filter);
   }
 
@@ -2881,7 +2908,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
    */
   @Override
   public boolean exists(Path f) throws IOException {
-    incrementStatistic(INVOCATION_EXISTS);
+    entryPoint(INVOCATION_EXISTS);
     return super.exists(f);
   }
 
@@ -2892,7 +2919,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   @Override
   @SuppressWarnings("deprecation")
   public boolean isDirectory(Path f) throws IOException {
-    incrementStatistic(INVOCATION_IS_DIRECTORY);
+    entryPoint(INVOCATION_IS_DIRECTORY);
     return super.isDirectory(f);
   }
 
@@ -2903,7 +2930,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   @Override
   @SuppressWarnings("deprecation")
   public boolean isFile(Path f) throws IOException {
-    incrementStatistic(INVOCATION_IS_FILE);
+    entryPoint(INVOCATION_IS_FILE);
     return super.isFile(f);
   }
 
@@ -2948,7 +2975,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
 
   private RemoteIterator<LocatedFileStatus> innerListFiles(Path f, boolean
       recursive, Listing.FileStatusAcceptor acceptor) throws IOException {
-    incrementStatistic(INVOCATION_LIST_FILES);
+    entryPoint(INVOCATION_LIST_FILES);
     Path path = qualify(f);
     LOG.debug("listFiles({}, {})", path, recursive);
     try {
@@ -3033,7 +3060,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f,
       final PathFilter filter)
       throws FileNotFoundException, IOException {
-    incrementStatistic(INVOCATION_LIST_LOCATED_STATUS);
+    entryPoint(INVOCATION_LIST_LOCATED_STATUS);
     Path path = qualify(f);
     LOG.debug("listLocatedStatus({}, {}", path, filter);
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef450df4/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index 2457217..6d66739 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -89,6 +89,9 @@ public final class S3AUtils {
       "is abstract and therefore cannot be created";
   static final String ENDPOINT_KEY = "Endpoint";
 
+  /** Filesystem is closed; kept here to keep the errors close. */
+  static final String E_FS_CLOSED = "FileSystem is closed!";
+
   /**
    * Core property for provider path. Duplicated here for consistent
    * code across Hadoop version: {@value}.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef450df4/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClosedFS.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClosedFS.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClosedFS.java
new file mode 100644
index 0000000..6e81452
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClosedFS.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import java.io.IOException;
+
+import org.junit.Test;
+
+import org.apache.hadoop.fs.Path;
+
+import static org.apache.hadoop.test.LambdaTestUtils.*;
+import static org.apache.hadoop.fs.s3a.S3AUtils.E_FS_CLOSED;
+
+/**
+ * Tests of the S3A FileSystem which is closed; just make sure
+ * that that basic file Ops fail meaningfully.
+ */
+public class ITestS3AClosedFS extends AbstractS3ATestBase {
+
+  private Path root = new Path("/");
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    root = getFileSystem().makeQualified(new Path("/"));
+    getFileSystem().close();
+  }
+
+  @Override
+  public void teardown()  {
+    // no op, as the FS is closed
+  }
+
+  @Test
+  public void testClosedGetFileStatus() throws Exception {
+    intercept(IOException.class, E_FS_CLOSED,
+        () -> getFileSystem().getFileStatus(root));
+  }
+
+  @Test
+  public void testClosedListStatus() throws Exception {
+    intercept(IOException.class, E_FS_CLOSED,
+        () -> getFileSystem().listStatus(root));
+  }
+
+  @Test
+  public void testClosedListFile() throws Exception {
+    intercept(IOException.class, E_FS_CLOSED,
+        () -> getFileSystem().listFiles(root, false));
+  }
+
+  @Test
+  public void testClosedListLocatedStatus() throws Exception {
+    intercept(IOException.class, E_FS_CLOSED,
+        () -> getFileSystem().listLocatedStatus(root));
+  }
+
+  @Test
+  public void testClosedCreate() throws Exception {
+    intercept(IOException.class, E_FS_CLOSED,
+        () -> getFileSystem().create(path("to-create")).close());
+  }
+
+  @Test
+  public void testClosedDelete() throws Exception {
+    intercept(IOException.class, E_FS_CLOSED,
+        () ->  getFileSystem().delete(path("to-delete"), false));
+  }
+
+  @Test
+  public void testClosedOpen() throws Exception {
+    intercept(IOException.class, E_FS_CLOSED,
+        () ->  getFileSystem().open(path("to-open")));
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/38] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by ae...@apache.org.
Add 2.8.3 release jdiff files.

(cherry picked from commit c89f99aade575ab1f6a9836df719cda272293d90)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7f8caf5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7f8caf5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7f8caf5

Branch: refs/heads/HDFS-7240
Commit: a7f8caf58ed47574861d455a9d9e1521e35c10b9
Parents: 25a36b7
Author: Junping Du <ju...@apache.org>
Authored: Mon Dec 18 22:30:32 2017 -0800
Committer: Junping Du <ju...@apache.org>
Committed: Mon Dec 18 22:39:01 2017 -0800

----------------------------------------------------------------------
 .../jdiff/Apache_Hadoop_Common_2.8.3.xml        | 38433 +++++++++++++++++
 .../jdiff/Apache_Hadoop_HDFS_2.8.3.xml          |   312 +
 .../Apache_Hadoop_MapReduce_Common_2.8.3.xml    |   113 +
 .../Apache_Hadoop_MapReduce_Core_2.8.3.xml      | 27495 ++++++++++++
 .../Apache_Hadoop_MapReduce_JobClient_2.8.3.xml |    16 +
 .../jdiff/Apache_Hadoop_YARN_Client_2.8.3.xml   |  2316 +
 .../jdiff/Apache_Hadoop_YARN_Common_2.8.3.xml   |  2665 ++
 .../Apache_Hadoop_YARN_Server_Common_2.8.3.xml  |   829 +
 8 files changed, 72179 insertions(+)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/38] hadoop git commit: HDFS-9023. When NN is not able to identify DN for replication, reason behind it can be logged.

Posted by ae...@apache.org.
HDFS-9023. When NN is not able to identify DN for replication, reason behind it can be logged.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5bf7e594
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5bf7e594
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5bf7e594

Branch: refs/heads/HDFS-7240
Commit: 5bf7e594d7d54e5295fe4240c3d60c08d4755ab7
Parents: d31c9d8
Author: Xiao Chen <xi...@apache.org>
Authored: Thu Dec 28 11:52:49 2017 -0800
Committer: Xiao Chen <xi...@apache.org>
Committed: Thu Dec 28 11:54:08 2017 -0800

----------------------------------------------------------------------
 .../BlockPlacementPolicyDefault.java            | 74 +++++++++++++++++---
 .../blockmanagement/DatanodeDescriptor.java     |  2 +-
 2 files changed, 64 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bf7e594/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index b925feb..a37cda4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -62,6 +62,28 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
         }
       };
 
+  private static final ThreadLocal<HashMap<NodeNotChosenReason, Integer>>
+      CHOOSE_RANDOM_REASONS = ThreadLocal
+      .withInitial(() -> new HashMap<NodeNotChosenReason, Integer>());
+
+  private enum NodeNotChosenReason {
+    NOT_IN_SERVICE("the node isn't in service"),
+    NODE_STALE("the node is stale"),
+    NODE_TOO_BUSY("the node is too busy"),
+    TOO_MANY_NODES_ON_RACK("the rack has too many chosen nodes"),
+    NOT_ENOUGH_STORAGE_SPACE("no enough storage space to place the block");
+
+    private final String text;
+
+    NodeNotChosenReason(final String logText) {
+      text = logText;
+    }
+
+    private String getText() {
+      return text;
+    }
+  }
+
   protected boolean considerLoad; 
   protected double considerLoadFactor;
   private boolean preferLocalNode;
@@ -711,6 +733,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
       builder.setLength(0);
       builder.append("[");
     }
+    CHOOSE_RANDOM_REASONS.get().clear();
     boolean badTarget = false;
     DatanodeStorageInfo firstChosen = null;
     while (numOfReplicas > 0) {
@@ -781,14 +804,24 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     }
     if (numOfReplicas>0) {
       String detail = enableDebugLogging;
-      if (LOG.isDebugEnabled()) {
-        if (badTarget && builder != null) {
-          detail = builder.toString();
+      if (LOG.isDebugEnabled() && builder != null) {
+        detail = builder.toString();
+        if (badTarget) {
           builder.setLength(0);
         } else {
+          if (detail.length() > 1) {
+            // only log if there's more than "[", which is always appended at
+            // the beginning of this method.
+            LOG.debug(detail);
+          }
           detail = "";
         }
       }
+      final HashMap<NodeNotChosenReason, Integer> reasonMap =
+          CHOOSE_RANDOM_REASONS.get();
+      if (!reasonMap.isEmpty()) {
+        LOG.info("Not enough replicas was chosen. Reason:{}", reasonMap);
+      }
       throw new NotEnoughReplicasException(detail);
     }
     
@@ -834,19 +867,38 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     if (storage != null) {
       results.add(storage);
     } else {
-      logNodeIsNotChosen(dnd, "no good storage to place the block ");
+      logNodeIsNotChosen(dnd, NodeNotChosenReason.NOT_ENOUGH_STORAGE_SPACE,
+          " for storage type " + storageType);
     }
     return storage;
   }
 
   private static void logNodeIsNotChosen(DatanodeDescriptor node,
-      String reason) {
+      NodeNotChosenReason reason) {
+    logNodeIsNotChosen(node, reason, null);
+  }
+
+  private static void logNodeIsNotChosen(DatanodeDescriptor node,
+      NodeNotChosenReason reason, String reasonDetails) {
+    assert reason != null;
     if (LOG.isDebugEnabled()) {
       // build the error message for later use.
       debugLoggingBuilder.get()
           .append("\n  Datanode ").append(node)
-          .append(" is not chosen since ").append(reason).append(".");
+          .append(" is not chosen since ").append(reason.getText());
+      if (reasonDetails != null) {
+        debugLoggingBuilder.get().append(" ").append(reasonDetails);
+      }
+      debugLoggingBuilder.get().append(".");
+    }
+    // always populate reason map to log high level reasons.
+    final HashMap<NodeNotChosenReason, Integer> reasonMap =
+        CHOOSE_RANDOM_REASONS.get();
+    Integer base = reasonMap.get(reason);
+    if (base == null) {
+      base = 0;
     }
+    reasonMap.put(reason, base + 1);
   }
 
   /**
@@ -868,13 +920,13 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
                          boolean avoidStaleNodes) {
     // check if the node is (being) decommissioned
     if (!node.isInService()) {
-      logNodeIsNotChosen(node, "the node isn't in service.");
+      logNodeIsNotChosen(node, NodeNotChosenReason.NOT_IN_SERVICE);
       return false;
     }
 
     if (avoidStaleNodes) {
       if (node.isStale(this.staleInterval)) {
-        logNodeIsNotChosen(node, "the node is stale ");
+        logNodeIsNotChosen(node, NodeNotChosenReason.NODE_STALE);
         return false;
       }
     }
@@ -885,8 +937,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
           stats.getInServiceXceiverAverage();
       final int nodeLoad = node.getXceiverCount();
       if (nodeLoad > maxLoad) {
-        logNodeIsNotChosen(node, "the node is too busy (load: " + nodeLoad
-            + " > " + maxLoad + ") ");
+        logNodeIsNotChosen(node, NodeNotChosenReason.NODE_TOO_BUSY,
+            "(load: " + nodeLoad + " > " + maxLoad + ")");
         return false;
       }
     }
@@ -901,7 +953,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
       }
     }
     if (counter > maxTargetPerRack) {
-      logNodeIsNotChosen(node, "the rack has too many chosen nodes ");
+      logNodeIsNotChosen(node, NodeNotChosenReason.TOO_MANY_NODES_ON_RACK);
       return false;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bf7e594/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index fc58708..618bc13 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -764,7 +764,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
       }
     }
     if (requiredSize > remaining - scheduledSize) {
-      LOG.debug(
+      BlockPlacementPolicy.LOG.debug(
           "The node {} does not have enough {} space (required={},"
           + " scheduled={}, remaining={}).",
           this, t, requiredSize, scheduledSize, remaining);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/38] hadoop git commit: HADOOP-15117. open(PathHandle) contract test should be exhaustive for default options

Posted by ae...@apache.org.
HADOOP-15117. open(PathHandle) contract test should be exhaustive for default options


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fe6f83c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fe6f83c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fe6f83c

Branch: refs/heads/HDFS-7240
Commit: 7fe6f83c8f0f67b1456c37d94b0de807e81a904a
Parents: 4bb765e
Author: Chris Douglas <cd...@apache.org>
Authored: Sat Dec 30 17:58:35 2017 -0800
Committer: Chris Douglas <cd...@apache.org>
Committed: Sat Dec 30 17:58:35 2017 -0800

----------------------------------------------------------------------
 .../fs/contract/AbstractContractOpenTest.java   | 258 -------------------
 .../AbstractContractPathHandleTest.java         | 246 ++++++++++++++++++
 .../hadoop/hdfs/protocol/HdfsPathHandle.java    |   3 +
 .../hdfs/TestHDFSContractPathHandle.java        |  55 ++++
 4 files changed, 304 insertions(+), 258 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe6f83c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
index ab179eb..d475c6e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
@@ -19,27 +19,17 @@ package org.apache.hadoop.fs.contract;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.Arrays;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.InvalidPathHandleException;
-import org.apache.hadoop.fs.Options.HandleOpt;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathHandle;
-import org.apache.hadoop.fs.RawPathHandle;
 import org.apache.hadoop.io.IOUtils;
 
-import static org.apache.hadoop.fs.contract.ContractTestUtils.appendFile;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
-import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
-import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyRead;
-import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyFileContents;
 
 import org.junit.Test;
 
@@ -173,252 +163,4 @@ public abstract class AbstractContractOpenTest
     instream.close();
   }
 
-  /**
-   * Skip a test case if the FS doesn't support file references.
-   * The feature is assumed to be unsupported unless stated otherwise.
-   */
-  protected void assumeSupportsFileReference() throws IOException {
-    if (getContract().isSupported(SUPPORTS_FILE_REFERENCE, false)) {
-      return;
-    }
-    skip("Skipping as unsupported feature: " + SUPPORTS_FILE_REFERENCE);
-  }
-
-  /**
-   * Skip a test case if the FS doesn't support content validation.
-   * The feature is assumed to be unsupported unless stated otherwise.
-   */
-  protected void assumeSupportsContentCheck() throws IOException {
-    if (getContract().isSupported(SUPPORTS_CONTENT_CHECK, false)) {
-      return;
-    }
-    skip("Skipping as unsupported feature: " + SUPPORTS_CONTENT_CHECK);
-  }
-
-  private PathHandle getHandleOrSkip(FileStatus stat, HandleOpt... opts) {
-    try {
-      return getFileSystem().getPathHandle(stat, opts);
-    } catch (UnsupportedOperationException e) {
-      skip("FileSystem does not support " + Arrays.toString(opts));
-    }
-    // unreachable
-    return null;
-  }
-
-  /**
-   * Verify {@link HandleOpt#exact()} handle semantics.
-   * @throws Throwable on error
-   */
-  @Test
-  public void testOpenFileByExact() throws Throwable {
-    describe("verify open(getPathHandle(FileStatus, exact())) operations" +
-        "detect changes");
-    assumeSupportsContentCheck();
-    assumeSupportsFileReference();
-    Path path1 = path("testopenfilebyexact1");
-    Path path2 = path("testopenfilebyexact2");
-    byte[] file1 = dataset(TEST_FILE_LEN, 43, 255);
-    createFile(getFileSystem(), path1, false, file1);
-    FileStatus stat1 = getFileSystem().getFileStatus(path1);
-    assertNotNull(stat1);
-    assertEquals(path1, stat1.getPath());
-    ContractTestUtils.rename(getFileSystem(), path1, path2);
-    FileStatus stat2 = getFileSystem().getFileStatus(path2);
-    assertNotNull(stat2);
-    assertEquals(path2, stat2.getPath());
-    // create identical file at same location, orig still exists at path2
-    createFile(getFileSystem(), path1, false, file1);
-
-    PathHandle fd1 = getHandleOrSkip(stat1, HandleOpt.exact());
-    PathHandle fd2 = getHandleOrSkip(stat2, HandleOpt.exact());
-
-    // verify path1, path2 contents identical
-    verifyFileContents(getFileSystem(), path1, file1);
-    verifyFileContents(getFileSystem(), path2, file1);
-    try {
-      // the PathHandle will not resolve, even though
-      // the original entity exists, it has not been modified, and an
-      // identical file exists at the old path. The handle would also
-      // fail to resolve if path1 had been modified
-      instream = getFileSystem().open(fd1);
-      fail("Expected an exception");
-    } catch (InvalidPathHandleException e) {
-      // expected
-    }
-
-    // verify unchanged resolves
-    instream = getFileSystem().open(fd2);
-    verifyRead(instream, file1, 0, TEST_FILE_LEN);
-  }
-
-  /**
-   * Verify {@link HandleOpt#content()} handle semantics.
-   * @throws Throwable on error
-   */
-  @Test
-  public void testOpenFileByContent() throws Throwable {
-    describe("verify open(getPathHandle(FileStatus, content())) operations" +
-        "follow relocation");
-    assumeSupportsContentCheck();
-    assumeSupportsFileReference();
-    Path path1 = path("testopenfilebycontent1");
-    Path path2 = path("testopenfilebycontent2");
-    byte[] file1 = dataset(TEST_FILE_LEN, 43, 255);
-    createFile(getFileSystem(), path1, false, file1);
-    FileStatus stat = getFileSystem().getFileStatus(path1);
-    assertNotNull(stat);
-    assertEquals(path1, stat.getPath());
-    // rename the file after obtaining FileStatus
-    ContractTestUtils.rename(getFileSystem(), path1, path2);
-
-    // obtain handle to entity from #getFileStatus call
-    PathHandle fd = getHandleOrSkip(stat, HandleOpt.content());
-
-    try (FSDataInputStream in = getFileSystem().open(fd)) {
-      // verify read of consistent content at new location
-      verifyRead(in, file1, 0, TEST_FILE_LEN);
-    }
-
-    // modify the file at its new location by appending data
-    byte[] file1a = dataset(TEST_FILE_LEN, 44, 255);
-    appendFile(getFileSystem(), path2, file1a);
-    byte[] file1x = Arrays.copyOf(file1, file1.length + file1a.length);
-    System.arraycopy(file1a, 0, file1x, file1.length, file1a.length);
-    // verify fd entity contains contents of file1 + appended bytes
-    verifyFileContents(getFileSystem(), path2, file1x);
-
-    try {
-      // handle should not resolve when content changed
-      instream = getFileSystem().open(fd);
-      fail("Failed to detect change to content");
-    } catch (InvalidPathHandleException e) {
-      // expected
-    }
-  }
-
-
-  /**
-   * Verify {@link HandleOpt#path()} handle semantics.
-   * @throws Throwable on error
-   */
-  @Test
-  public void testOpenFileByPath() throws Throwable {
-    describe("verify open(getPathHandle(FileStatus, path())) operations" +
-        "detect changes");
-    assumeSupportsContentCheck();
-    Path path1 = path("testopenfilebypath1");
-    Path path2 = path("testopenfilebypath2");
-
-    byte[] file1 = dataset(TEST_FILE_LEN, 43, 255);
-    createFile(getFileSystem(), path1, false, file1);
-    FileStatus stat1 = getFileSystem().getFileStatus(path1);
-    assertNotNull(stat1);
-    assertEquals(path1, stat1.getPath());
-    ContractTestUtils.rename(getFileSystem(), path1, path2);
-    FileStatus stat2 = getFileSystem().getFileStatus(path2);
-    assertNotNull(stat2);
-    assertEquals(path2, stat2.getPath());
-    // create identical file at same location, orig still exists at path2
-    createFile(getFileSystem(), path1, false, file1);
-
-    PathHandle fd1 = getHandleOrSkip(stat1, HandleOpt.path());
-    PathHandle fd2 = getHandleOrSkip(stat2, HandleOpt.path());
-
-    // verify path1, path2 contents identical
-    verifyFileContents(getFileSystem(), path1, file1);
-    verifyFileContents(getFileSystem(), path2, file1);
-    try {
-      // verify attempt to resolve the handle fails
-      instream = getFileSystem().open(fd1);
-      fail("Expected an exception");
-    } catch (InvalidPathHandleException e) {
-      // expected
-    }
-
-    // verify content change OK
-    byte[] file2a = dataset(TEST_FILE_LEN, 44, 255);
-    ContractTestUtils.appendFile(getFileSystem(), path2, file2a);
-    byte[] file2x = Arrays.copyOf(file1, file1.length + file2a.length);
-    System.arraycopy(file2a, 0, file2x, file1.length, file2a.length);
-    // verify path2 contains contents of orig + appended bytes
-    verifyFileContents(getFileSystem(), path2, file2x);
-    // verify open by fd succeeds
-    instream = getFileSystem().open(fd2);
-    verifyRead(instream, file2x, 0, 2 * TEST_FILE_LEN);
-  }
-
-  /**
-   * Verify {@link HandleOpt#reference()} handle semantics.
-   * @throws Throwable on error
-   */
-  @Test
-  public void testOpenFileByReference() throws Throwable {
-    describe("verify open(getPathHandle(FileStatus, reference())) operations" +
-        " are independent of rename");
-    assumeSupportsFileReference();
-    Path path1 = path("testopenfilebyref1");
-    Path path2 = path("testopenfilebyref2");
-
-    byte[] file1 = dataset(TEST_FILE_LEN, 43, 255);
-    createFile(getFileSystem(), path1, false, file1);
-    FileStatus stat = getFileSystem().getFileStatus(path1);
-    assertNotNull(stat);
-    assertEquals(path1, stat.getPath());
-    ContractTestUtils.rename(getFileSystem(), path1, path2);
-
-    byte[] file2 = dataset(TEST_FILE_LEN, 44, 255);
-    createFile(getFileSystem(), path1, false, file2);
-    byte[] file1a = dataset(TEST_FILE_LEN, 42, 255);
-    appendFile(getFileSystem(), path2, file1a);
-    byte[] file1x = Arrays.copyOf(file1, file1.length + file1a.length);
-    System.arraycopy(file1a, 0, file1x, file1.length, file1a.length);
-
-    PathHandle fd = getHandleOrSkip(stat, HandleOpt.reference());
-
-    // verify path2 contains contents of file1 + appended bytes
-    verifyFileContents(getFileSystem(), path2, file1x);
-    // verify path1 contents contents of file2
-    verifyFileContents(getFileSystem(), path1, file2);
-
-    // verify fd contains contents of file1 + appended bytes
-    instream = getFileSystem().open(fd);
-    verifyRead(instream, file1x, 0, 2 * TEST_FILE_LEN);
-  }
-
-  /**
-   * Verify {@link PathHandle} may be serialized and restored.
-   * @throws Throwable on error
-   */
-  @Test
-  public void testOpenFileBySerializedReference() throws Throwable {
-    describe("verify PathHandle supports generic serialization");
-    assumeSupportsFileReference();
-    Path path1 = path("testopenfilebyref1");
-    Path path2 = path("testopenfilebyref2");
-
-    byte[] file1 = dataset(TEST_FILE_LEN, 43, 255);
-    createFile(getFileSystem(), path1, false, file1);
-    FileStatus stat = getFileSystem().getFileStatus(path1);
-    assertNotNull(stat);
-    assertEquals(path1, stat.getPath());
-    ContractTestUtils.rename(getFileSystem(), path1, path2);
-
-    byte[] file2 = dataset(TEST_FILE_LEN, 44, 255);
-    createFile(getFileSystem(), path1, false, file2);
-
-    PathHandle fd = getHandleOrSkip(stat, HandleOpt.reference());
-
-    // serialize PathHandle
-    ByteBuffer sb = fd.bytes();
-    PathHandle fdb = new RawPathHandle(sb);
-
-    instream = getFileSystem().open(fdb);
-    // verify stat contains contents of file1
-    verifyRead(instream, file1, 0, TEST_FILE_LEN);
-    // verify path2 contains contents of file1
-    verifyFileContents(getFileSystem(), path2, file1);
-    // verify path1 contents contents of file2
-    verifyFileContents(getFileSystem(), path1, file2);
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe6f83c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java
new file mode 100644
index 0000000..fbe28c3
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.fs.contract;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.InvalidPathHandleException;
+import org.apache.hadoop.fs.Options.HandleOpt;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathHandle;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.appendFile;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyRead;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyFileContents;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
+import org.apache.hadoop.fs.RawPathHandle;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * Test {@link PathHandle} operations and semantics.
+ * @see ContractOptions#SUPPORTS_FILE_REFERENCE
+ * @see ContractOptions#SUPPORTS_CONTENT_CHECK
+ * @see org.apache.hadoop.fs.FileSystem#getPathHandle(FileStatus, HandleOpt...)
+ * @see org.apache.hadoop.fs.FileSystem#open(PathHandle)
+ * @see org.apache.hadoop.fs.FileSystem#open(PathHandle, int)
+ */
+@RunWith(Parameterized.class)
+public abstract class AbstractContractPathHandleTest
+    extends AbstractFSContractTestBase {
+
+  private final HandleOpt[] opts;
+  private final boolean serialized;
+
+  private static final byte[] B1 = dataset(TEST_FILE_LEN, 43, 255);
+  private static final byte[] B2 = dataset(TEST_FILE_LEN, 44, 255);
+
+  /**
+   * Create an instance of the test from {@link #params()}.
+   * @param testname Name of the set of options under test
+   * @param opts Set of {@link HandleOpt} params under test.
+   * @param serialized Serialize the handle before using it.
+   */
+  public AbstractContractPathHandleTest(String testname, HandleOpt[] opts,
+      boolean serialized) {
+    this.opts = opts;
+    this.serialized = serialized;
+  }
+
+  /**
+   * Run test against all combinations of default options. Also run each
+   * after converting the PathHandle to bytes and back.
+   * @return
+   */
+  @Parameterized.Parameters(name="Test{0}")
+  public static Collection<Object[]> params() {
+    return Arrays.asList(
+        Arrays.asList("Exact", HandleOpt.exact()),
+        Arrays.asList("Content", HandleOpt.content()),
+        Arrays.asList("Path", HandleOpt.path()),
+        Arrays.asList("Reference", HandleOpt.reference())
+    ).stream()
+    .flatMap((x) -> Arrays.asList(true, false).stream()
+        .map((b) -> {
+          ArrayList<Object> y = new ArrayList<>(x);
+          y.add(b);
+          return y;
+        }))
+    .map(ArrayList::toArray)
+    .collect(Collectors.toList());
+  }
+
+  @Override
+  protected Configuration createConfiguration() {
+    Configuration conf = super.createConfiguration();
+    conf.setInt(IO_FILE_BUFFER_SIZE_KEY, 4096);
+    return conf;
+  }
+
+  @Test
+  public void testIdent() throws IOException {
+    describe("verify simple open, no changes");
+    FileStatus stat = testFile(B1);
+    PathHandle fd = getHandleOrSkip(stat);
+    verifyFileContents(getFileSystem(), stat.getPath(), B1);
+
+    try (FSDataInputStream in = getFileSystem().open(fd)) {
+      verifyRead(in, B1, 0, TEST_FILE_LEN);
+    }
+  }
+
+  @Test
+  public void testChanged() throws IOException {
+    describe("verify open(PathHandle, changed(*))");
+    assumeSupportsContentCheck();
+    HandleOpt.Data data = HandleOpt.getOpt(HandleOpt.Data.class, opts)
+        .orElseThrow(IllegalArgumentException::new);
+    FileStatus stat = testFile(B1);
+    // modify the file by appending data
+    appendFile(getFileSystem(), stat.getPath(), B2);
+    byte[] b12 = Arrays.copyOf(B1, B1.length + B2.length);
+    System.arraycopy(B2, 0, b12, B1.length, B2.length);
+    // verify fd entity contains contents of file1 + appended bytes
+    verifyFileContents(getFileSystem(), stat.getPath(), b12);
+    // get the handle *after* the file has been modified
+    PathHandle fd = getHandleOrSkip(stat);
+
+    try (FSDataInputStream in = getFileSystem().open(fd)) {
+      assertTrue("Failed to detect content change", data.allowChange());
+      verifyRead(in, b12, 0, b12.length);
+    } catch (InvalidPathHandleException e) {
+      assertFalse("Failed to allow content change", data.allowChange());
+    }
+  }
+
+  @Test
+  public void testMoved() throws IOException {
+    describe("verify open(PathHandle, moved(*))");
+    assumeSupportsFileReference();
+    HandleOpt.Location loc = HandleOpt.getOpt(HandleOpt.Location.class, opts)
+        .orElseThrow(IllegalArgumentException::new);
+    FileStatus stat = testFile(B1);
+    // rename the file after obtaining FileStatus
+    ContractTestUtils.rename(getFileSystem(), stat.getPath(),
+        path(stat.getPath() + "2"));
+    // obtain handle to entity from #getFileStatus call
+    PathHandle fd = getHandleOrSkip(stat);
+
+    try (FSDataInputStream in = getFileSystem().open(fd)) {
+      assertTrue("Failed to detect location change", loc.allowChange());
+      verifyRead(in, B1, 0, B1.length);
+    } catch (InvalidPathHandleException e) {
+      assertFalse("Failed to allow location change", loc.allowChange());
+    }
+  }
+
+  @Test
+  public void testChangedAndMoved() throws IOException {
+    describe("verify open(PathHandle, changed(*), moved(*))");
+    assumeSupportsFileReference();
+    assumeSupportsContentCheck();
+    HandleOpt.Data data = HandleOpt.getOpt(HandleOpt.Data.class, opts)
+        .orElseThrow(IllegalArgumentException::new);
+    HandleOpt.Location loc = HandleOpt.getOpt(HandleOpt.Location.class, opts)
+        .orElseThrow(IllegalArgumentException::new);
+    FileStatus stat = testFile(B1);
+    Path dst = path(stat.getPath() + "2");
+    ContractTestUtils.rename(getFileSystem(), stat.getPath(), dst);
+    appendFile(getFileSystem(), dst, B2);
+    PathHandle fd = getHandleOrSkip(stat);
+
+    byte[] b12 = Arrays.copyOf(B1, B1.length + B2.length);
+    System.arraycopy(B2, 0, b12, B1.length, B2.length);
+    try (FSDataInputStream in = getFileSystem().open(fd)) {
+      assertTrue("Failed to detect location change", loc.allowChange());
+      assertTrue("Failed to detect content change", data.allowChange());
+      verifyRead(in, b12, 0, b12.length);
+    } catch (InvalidPathHandleException e) {
+      if (data.allowChange()) {
+        assertFalse("Failed to allow location change", loc.allowChange());
+      }
+      if (loc.allowChange()) {
+        assertFalse("Failed to allow content change", data.allowChange());
+      }
+    }
+  }
+
+  private FileStatus testFile(byte[] content) throws IOException {
+    Path path = path(methodName.getMethodName());
+    createFile(getFileSystem(), path, false, content);
+    FileStatus stat = getFileSystem().getFileStatus(path);
+    assertNotNull(stat);
+    assertEquals(path, stat.getPath());
+    return stat;
+  }
+
+  /**
+   * Skip a test case if the FS doesn't support file references.
+   * The feature is assumed to be unsupported unless stated otherwise.
+   */
+  protected void assumeSupportsFileReference() throws IOException {
+    if (getContract().isSupported(SUPPORTS_FILE_REFERENCE, false)) {
+      return;
+    }
+    skip("Skipping as unsupported feature: " + SUPPORTS_FILE_REFERENCE);
+  }
+
+  /**
+   * Skip a test case if the FS doesn't support content validation.
+   * The feature is assumed to be unsupported unless stated otherwise.
+   */
+  protected void assumeSupportsContentCheck() throws IOException {
+    if (getContract().isSupported(SUPPORTS_CONTENT_CHECK, false)) {
+      return;
+    }
+    skip("Skipping as unsupported feature: " + SUPPORTS_CONTENT_CHECK);
+  }
+
+  /**
+   * Utility method to obtain a handle or skip the test if the set of opts
+   * are not supported.
+   * @param stat Target file status
+   * @return Handle to the indicated entity or skip the test
+   */
+  protected PathHandle getHandleOrSkip(FileStatus stat) {
+    try {
+      PathHandle fd = getFileSystem().getPathHandle(stat, opts);
+      if (serialized) {
+        ByteBuffer sb = fd.bytes();
+        return new RawPathHandle(sb);
+      }
+      return fd;
+    } catch (UnsupportedOperationException e) {
+      skip("FileSystem does not support " + Arrays.toString(opts));
+    }
+    // unreachable
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe6f83c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsPathHandle.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsPathHandle.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsPathHandle.java
index 14661ae..a04aeeb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsPathHandle.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsPathHandle.java
@@ -66,6 +66,9 @@ public final class HdfsPathHandle implements PathHandle {
 
   public void verify(HdfsLocatedFileStatus stat)
       throws InvalidPathHandleException {
+    if (null == stat) {
+      throw new InvalidPathHandleException("Could not resolve handle");
+    }
     if (mtime != null && mtime != stat.getModificationTime()) {
       throw new InvalidPathHandleException("Content changed");
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fe6f83c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java
new file mode 100644
index 0000000..c65a60b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.contract.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.contract.AbstractContractPathHandleTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+
+/**
+ * Verify HDFS compliance with {@link org.apache.hadoop.fs.PathHandle}
+ * semantics.
+ */
+public class TestHDFSContractPathHandle
+    extends AbstractContractPathHandleTest {
+
+  public TestHDFSContractPathHandle(String testname, Options.HandleOpt[] opts,
+      boolean serialized) {
+    super(testname, opts, serialized);
+  }
+
+  @BeforeClass
+  public static void createCluster() throws IOException {
+    HDFSContract.createCluster();
+  }
+
+  @AfterClass
+  public static void teardownCluster() throws IOException {
+    HDFSContract.destroyCluster();
+  }
+
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+    return new HDFSContract(conf);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/38] hadoop git commit: YARN-7555. Support multiple resource types in YARN native services. (wangda)

Posted by ae...@apache.org.
YARN-7555. Support multiple resource types in YARN native services. (wangda)

Change-Id: I330e6ee17a73962dcaadd766a3e72d2888681731


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7467e8fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7467e8fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7467e8fe

Branch: refs/heads/HDFS-7240
Commit: 7467e8fe5a95230986fed9d748769304af3f2b61
Parents: 8112761
Author: Wangda Tan <wa...@apache.org>
Authored: Fri Dec 29 11:46:30 2017 -0800
Committer: Wangda Tan <wa...@apache.org>
Committed: Fri Dec 29 15:34:08 2017 -0800

----------------------------------------------------------------------
 .../dev-support/findbugs-exclude.xml            |   1 -
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |  18 ++-
 .../dev-support/findbugs-exclude.xml            |   8 ++
 .../hadoop/yarn/service/ServiceScheduler.java   |   7 ++
 .../yarn/service/api/records/Resource.java      |  48 ++++++--
 .../api/records/ResourceInformation.java        | 119 +++++++++++++++++++
 .../yarn/service/component/Component.java       |  35 +++++-
 .../hadoop/yarn/service/MockServiceAM.java      |  31 ++++-
 .../hadoop/yarn/service/TestServiceAM.java      |  63 +++++++++-
 .../yarn/service/conf/TestAppJsonResolve.java   |  17 +++
 .../hadoop/yarn/service/conf/examples/app.json  |  11 +-
 .../markdown/yarn-service/YarnServiceAPI.md     |  15 ++-
 12 files changed, 343 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index de4b0e6..6a10312 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -650,5 +650,4 @@
     <Method name="equals" />
     <Bug pattern="EQ_OVERRIDING_EQUALS_NOT_SYMMETRIC" />
   </Match>
-
 </FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
index 979883c..f142f66 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -247,7 +247,18 @@ definitions:
       kerberos_principal:
         description: The Kerberos Principal of the service
         $ref: '#/definitions/KerberosPrincipal'
-
+  ResourceInformation:
+    description:
+      ResourceInformation determines unit/value of resource types in addition to memory and vcores. It will be part of Resource object
+    properties:
+      value:
+        type: integer
+        format: int64
+        description: Integer value of the resource.
+      unit:
+        type: string
+        description:
+          Unit of the resource, acceptable values are: p/n/u/m/k/M/G/T/P/Ki/Mi/Gi/Ti/Pi. By default it is empty means no unit
   Resource:
     description:
       Resource determines the amount of resources (vcores, memory, network, etc.) usable by a container. This field determines the resource to be applied for all the containers of a component or service. The resource specified at the service (or global) level can be overriden at the component level. Only one of profile OR cpu & memory are expected. It raises a validation exception otherwise.
@@ -262,6 +273,11 @@ definitions:
       memory:
         type: string
         description: Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB.
+      additional:
+        type: object
+        additionalProperties:
+          $ref: '#/definitions/ResourceInformation'
+        description: Map of resource name to ResourceInformation
   PlacementPolicy:
     description: Placement policy of an instance of a service. This feature is in the works in YARN-6592.
     properties:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml
index 80c04c8..15ce952 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml
@@ -48,4 +48,12 @@
         <Class name="org.apache.hadoop.yarn.service.ClientAMPolicyProvider"/>
         <Bug pattern="EI_EXPOSE_REP"/>
     </Match>
+    <!-- SE_BAD_FIELD -->
+    <Match>
+      <Class name="org.apache.hadoop.yarn.service.api.records.Resource" />
+      <Or>
+        <Field name="additional"/>
+      </Or>
+      <Bug pattern="SE_BAD_FIELD" />
+  </Match>
 </FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index 45cdd28..0a4ea07 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -76,6 +76,7 @@ import org.apache.hadoop.yarn.service.timelineservice.ServiceTimelinePublisher;
 import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
 import org.apache.hadoop.yarn.service.utils.ServiceRegistryUtils;
 import org.apache.hadoop.yarn.util.BoundedAppender;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -278,6 +279,12 @@ public class ServiceScheduler extends CompositeService {
     RegisterApplicationMasterResponse response = amRMClient
         .registerApplicationMaster(bindAddress.getHostName(),
             bindAddress.getPort(), "N/A");
+
+    // Update internal resource types according to response.
+    if (response.getResourceTypes() != null) {
+      ResourceUtils.reinitializeResources(response.getResourceTypes());
+    }
+
     if (response.getClientToAMTokenMasterKey() != null
         && response.getClientToAMTokenMasterKey().remaining() != 0) {
       context.secretManager

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java
index 8f682b2..c417ec0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java
@@ -17,16 +17,17 @@
 
 package org.apache.hadoop.yarn.service.api.records;
 
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.util.Objects;
-
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.annotation.JsonProperty;
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
+import javax.xml.bind.annotation.XmlElement;
+import java.util.Map;
+import java.util.Objects;
+
 /**
  * Resource determines the amount of resources (vcores, memory, network, etc.)
  * usable by a container. This field determines the resource to be applied for
@@ -46,6 +47,10 @@ public class Resource extends BaseResource implements Cloneable {
   private Integer cpus = 1;
   private String memory = null;
 
+  @JsonProperty("additional")
+  @XmlElement(name = "additional")
+  private Map<String, ResourceInformation> additional = null;
+
   /**
    * Each resource profile has a unique id which is associated with a
    * cluster-level predefined memory, cpus, etc.
@@ -112,6 +117,28 @@ public class Resource extends BaseResource implements Cloneable {
     return Long.parseLong(memory);
   }
 
+  public Resource setResourceInformations(
+      Map<String, ResourceInformation> resourceInformations) {
+    this.additional = resourceInformations;
+    return this;
+  }
+
+  public Resource resourceInformations(
+      Map<String, ResourceInformation> resourceInformations) {
+    this.additional = resourceInformations;
+    return this;
+  }
+
+  /**
+   * Map of resource name to ResourceInformation
+   * @return additional
+   **/
+  @ApiModelProperty(value = "Map of resource name to ResourceInformation")
+  @JsonProperty("additional")
+  public Map<String, ResourceInformation> getAdditional() {
+    return additional;
+  }
+
   @Override
   public boolean equals(java.lang.Object o) {
     if (this == o) {
@@ -121,14 +148,15 @@ public class Resource extends BaseResource implements Cloneable {
       return false;
     }
     Resource resource = (Resource) o;
-    return Objects.equals(this.profile, resource.profile)
-        && Objects.equals(this.cpus, resource.cpus)
-        && Objects.equals(this.memory, resource.memory);
+    return Objects.equals(this.profile, resource.profile) && Objects.equals(
+        this.cpus, resource.cpus) && Objects.equals(this.memory,
+        resource.memory) && Objects.equals(this.additional,
+        resource.additional);
   }
 
   @Override
   public int hashCode() {
-    return Objects.hash(profile, cpus, memory);
+    return Objects.hash(profile, cpus, memory, additional);
   }
 
   @Override
@@ -139,6 +167,8 @@ public class Resource extends BaseResource implements Cloneable {
     sb.append("    profile: ").append(toIndentedString(profile)).append("\n");
     sb.append("    cpus: ").append(toIndentedString(cpus)).append("\n");
     sb.append("    memory: ").append(toIndentedString(memory)).append("\n");
+    sb.append("    additional: ").append(
+        toIndentedString(additional)).append("\n");
     sb.append("}");
     return sb.toString();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
new file mode 100644
index 0000000..f39b11a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.api.records;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.google.gson.annotations.SerializedName;
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+import java.util.Objects;
+
+/**
+ * ResourceInformation determines unit/name/value of resource types in addition to memory and vcores. It will be part of Resource object
+ */
+@ApiModel(description = "ResourceInformation determines unit/value of resource types in addition to memory and vcores. It will be part of Resource object")
+@javax.annotation.Generated(value = "io.swagger.codegen.languages.JavaClientCodegen",
+                            date = "2017-11-22T15:15:49.495-08:00")
+public class ResourceInformation {
+  @SerializedName("value")
+  private Long value = null;
+
+  @SerializedName("unit")
+  private String unit = null;
+
+  public ResourceInformation value(Long value) {
+    this.value = value;
+    return this;
+  }
+
+  /**
+   * Integer value of the resource.
+   *
+   * @return value
+   **/
+  @ApiModelProperty(value = "Integer value of the resource.")
+  @JsonProperty("value")
+  public Long getValue() {
+    return value;
+  }
+
+  public void setValue(Long value) {
+    this.value = value;
+  }
+
+  public ResourceInformation unit(String unit) {
+    this.unit = unit;
+    return this;
+  }
+
+  /**
+   * @return unit
+   **/
+  @ApiModelProperty(value = "")
+  @JsonProperty("unit")
+  public String getUnit() {
+    return unit == null ? "" : unit;
+  }
+
+  public void setUnit(String unit) {
+    this.unit = unit;
+  }
+
+  @Override
+  public boolean equals(java.lang.Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    ResourceInformation resourceInformation = (ResourceInformation) o;
+    return Objects
+        .equals(this.value, resourceInformation.value) && Objects.equals(
+        this.unit, resourceInformation.unit);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(value, unit);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("class ResourceInformation {\n");
+    sb.append("    value: ").append(toIndentedString(value)).append("\n");
+    sb.append("    unit: ").append(toIndentedString(unit)).append("\n");
+    sb.append("}");
+    return sb.toString();
+  }
+
+  /**
+   * Convert the given object to string with each line indented by 4 spaces
+   * (except the first line).
+   */
+  private String toIndentedString(java.lang.Object o) {
+    if (o == null) {
+      return "null";
+    }
+    return o.toString().replace("\n", "\n    ");
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index a84c1b1..3090692 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
 import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.service.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceId;
 import org.apache.hadoop.yarn.service.ContainerFailureTracker;
@@ -392,9 +393,37 @@ public class Component implements EventHandler<ComponentEvent> {
 
   @SuppressWarnings({ "unchecked" })
   public void requestContainers(long count) {
-    Resource resource = Resource
-        .newInstance(componentSpec.getResource().calcMemoryMB(),
-            componentSpec.getResource().getCpus());
+    org.apache.hadoop.yarn.service.api.records.Resource componentResource =
+        componentSpec.getResource();
+
+    Resource resource = Resource.newInstance(componentResource.calcMemoryMB(),
+        componentResource.getCpus());
+
+    if (componentResource.getAdditional() != null) {
+      for (Map.Entry<String, ResourceInformation> entry : componentResource
+          .getAdditional().entrySet()) {
+
+        String resourceName = entry.getKey();
+
+        // Avoid setting memory/cpu under "additional"
+        if (resourceName.equals(
+            org.apache.hadoop.yarn.api.records.ResourceInformation.MEMORY_URI)
+            || resourceName.equals(
+            org.apache.hadoop.yarn.api.records.ResourceInformation.VCORES_URI)) {
+          LOG.warn("Please set memory/vcore in the main section of resource, "
+              + "ignoring this entry=" + resourceName);
+          continue;
+        }
+
+        ResourceInformation specInfo = entry.getValue();
+        org.apache.hadoop.yarn.api.records.ResourceInformation ri =
+            org.apache.hadoop.yarn.api.records.ResourceInformation.newInstance(
+                entry.getKey(),
+                specInfo.getUnit(),
+                specInfo.getValue());
+        resource.setResourceInformation(resourceName, ri);
+      }
+    }
 
     for (int i = 0; i < count; i++) {
       //TODO Once YARN-5468 is done, use that for anti-affinity

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
index 37b18fa..3e1582d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
@@ -29,8 +29,16 @@ import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
-
-import org.apache.hadoop.yarn.api.records.*;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.NMClient;
 import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
@@ -47,11 +55,16 @@ import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException;
 import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders;
 import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
 import org.apache.hadoop.yarn.util.Records;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.*;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeoutException;
 
@@ -88,7 +101,6 @@ public class MockServiceAM extends ServiceMaster {
     this.service = service;
   }
 
-
   @Override
   protected ContainerId getAMContainerId()
       throws BadClusterStateException {
@@ -185,7 +197,11 @@ public class MockServiceAM extends ServiceMaster {
           @Override
           public RegisterApplicationMasterResponse registerApplicationMaster(
               String appHostName, int appHostPort, String appTrackingUrl) {
-            return mock(RegisterApplicationMasterResponse.class);
+            RegisterApplicationMasterResponse response = mock(
+                RegisterApplicationMasterResponse.class);
+            when(response.getResourceTypes()).thenReturn(
+                ResourceUtils.getResourcesTypeInfo());
+            return response;
           }
 
           @Override public void unregisterApplicationMaster(
@@ -195,8 +211,11 @@ public class MockServiceAM extends ServiceMaster {
           }
         };
 
-        return AMRMClientAsync.createAMRMClientAsync(client1, 1000,
+        AMRMClientAsync<AMRMClient.ContainerRequest> amrmClientAsync =
+            AMRMClientAsync.createAMRMClientAsync(client1, 1000,
                 this.new AMRMClientCallback());
+
+        return amrmClientAsync;
       }
 
       @SuppressWarnings("SuspiciousMethodCalls")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
index 2a3303e..4dc1ebd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
@@ -18,17 +18,25 @@
 
 package org.apache.hadoop.yarn.service;
 
+import com.google.common.collect.ImmutableMap;
 import org.apache.commons.io.FileUtils;
 import org.apache.curator.test.TestingCluster;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.service.api.records.Component;
+import org.apache.hadoop.yarn.service.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.component.ComponentState;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState;
 import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -38,10 +46,12 @@ import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
 import java.util.concurrent.TimeoutException;
 
-import static org.apache.hadoop.registry.client.api.RegistryConstants
-    .KEY_REGISTRY_ZK_QUORUM;
+import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_REGISTRY_ZK_QUORUM;
 
 public class TestServiceAM extends ServiceTestUtils{
 
@@ -183,12 +193,12 @@ public class TestServiceAM extends ServiceTestUtils{
     am.init(conf);
     am.start();
     Thread.sleep(100);
-    GenericTestUtils.waitFor(() -> am.getComponent(comp1Name).getState().equals(
-        ComponentState.FLEXING), 100, 2000);
+    GenericTestUtils.waitFor(() -> am.getComponent(comp1Name).getState()
+        .equals(ComponentState.FLEXING), 100, 2000);
 
     // 1 pending instance
-    Assert.assertEquals(1,
-        am.getComponent(comp1Name).getPendingInstances().size());
+    Assert.assertEquals(1, am.getComponent(comp1Name).getPendingInstances()
+        .size());
 
     am.feedContainerToComp(exampleApp, 2, comp1Name);
 
@@ -198,6 +208,47 @@ public class TestServiceAM extends ServiceTestUtils{
         org.apache.hadoop.yarn.api.records.ContainerState.RUNNING,
         am.getCompInstance(comp1Name, comp1InstName).getContainerStatus()
             .getState());
+  }
+
+  @Test
+  public void testScheduleWithMultipleResourceTypes()
+      throws TimeoutException, InterruptedException, IOException {
+    ApplicationId applicationId = ApplicationId.newInstance(123456, 1);
+    Service exampleApp = new Service();
+    exampleApp.setId(applicationId.toString());
+    exampleApp.setName("testScheduleWithMultipleResourceTypes");
+
+    List<ResourceTypeInfo> resourceTypeInfos = new ArrayList<>(
+        ResourceUtils.getResourcesTypeInfo());
+    // Add 3rd resource type.
+    resourceTypeInfos.add(ResourceTypeInfo
+        .newInstance("resource-1", "", ResourceTypes.COUNTABLE));
+    // Reinitialize resource types
+    ResourceUtils.reinitializeResources(resourceTypeInfos);
+
+    Component serviceCompoent = createComponent("compa", 1, "pwd");
+    serviceCompoent.getResource().setResourceInformations(ImmutableMap
+        .of("resource-1", new ResourceInformation().value(3333L).unit("Gi")));
+    exampleApp.addComponent(serviceCompoent);
+
+    MockServiceAM am = new MockServiceAM(exampleApp);
+    am.init(conf);
+    am.start();
+
+    ServiceScheduler serviceScheduler = am.context.scheduler;
+    AMRMClientAsync<AMRMClient.ContainerRequest> amrmClientAsync =
+        serviceScheduler.getAmRMClient();
+
+    Collection<AMRMClient.ContainerRequest> rr =
+        amrmClientAsync.getMatchingRequests(0);
+    Assert.assertEquals(1, rr.size());
+
+    org.apache.hadoop.yarn.api.records.Resource capability =
+        rr.iterator().next().getCapability();
+    Assert.assertEquals(3333L, capability.getResourceValue("resource-1"));
+    Assert.assertEquals("Gi",
+        capability.getResourceInformation("resource-1").getUnits());
+
     am.stop();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java
index 18318aa..73f7fa1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.service.conf;
 
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.service.ServiceTestUtils;
+import org.apache.hadoop.yarn.service.api.records.Resource;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.api.records.ConfigFile;
 import org.apache.hadoop.yarn.service.api.records.Configuration;
@@ -194,6 +195,22 @@ public class TestAppJsonResolve extends Assert {
     assertEquals("60",
         worker.getProperty("yarn.service.failure-count-reset.window"));
 
+    // Validate worker's resources
+    Resource workerResource = orig.getComponent("worker").getResource();
+    Assert.assertEquals(1, workerResource.getCpus().intValue());
+    Assert.assertEquals(1024, workerResource.calcMemoryMB());
+    Assert.assertNotNull(workerResource.getAdditional());
+    Assert.assertEquals(2, workerResource.getAdditional().size());
+    Assert.assertEquals(3333, workerResource.getAdditional().get(
+        "resource-1").getValue().longValue());
+    Assert.assertEquals("Gi", workerResource.getAdditional().get(
+        "resource-1").getUnit());
+
+    Assert.assertEquals(5, workerResource.getAdditional().get(
+        "yarn.io/gpu").getValue().longValue());
+    Assert.assertEquals("", workerResource.getAdditional().get(
+        "yarn.io/gpu").getUnit());
+
     other = orig.getComponent("other").getConfiguration();
     assertEquals(0, other.getProperties().size());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json
index 2eb477f..7e56f1e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json
@@ -37,7 +37,16 @@
       "launch_command": "sleep 3600",
       "resource": {
         "cpus": 1,
-        "memory": "1024"
+        "memory": "1024",
+        "additional": {
+          "resource-1": {
+            "value": 3333,
+            "unit": "Gi"
+          },
+          "yarn.io/gpu": {
+            "value": 5
+          }
+        }
       },
       "configuration": {
         "properties": {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
index e224e5d..65d463d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
@@ -1,4 +1,4 @@
-<!---
+# <!---
   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at
@@ -330,6 +330,7 @@ Resource determines the amount of resources (vcores, memory, network, etc.) usab
 |profile|Each resource profile has a unique id which is associated with a cluster-level predefined memory, cpus, etc.|false|string||
 |cpus|Amount of vcores allocated to each container (optional but overrides cpus in profile if specified).|false|integer (int32)||
 |memory|Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB.|false|string||
+|additional|A map of resource type name to resource type information. Including value (integer), and unit (string). This will be used to specify resource other than cpu and memory. Please refer to example below. |  false | object ||
 
 
 ### Service
@@ -395,8 +396,14 @@ POST URL - http://localhost:8088/ws/v1/services
         "launch_command": "./start_nginx.sh",
         "resource": {
           "cpus": 1,
-          "memory": "256"
-       }
+          "memory": "256",
+          "additional" : {
+            "yarn.io/gpu" : {
+              "value" : 4,
+              "unit" : ""
+            }
+          }     
+        }
       }
     ]
 }
@@ -605,3 +612,5 @@ POST URL - http://localhost:8088:/ws/v1/services/hbase-app-1
   }
 }
 ```
+
+


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/38] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.8.3.xml b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.8.3.xml
new file mode 100644
index 0000000..a63b3ac
--- /dev/null
+++ b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.8.3.xml
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:57:09 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop MapReduce JobClient 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/hadoop-annotations.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/jdiff.jar -verbose -classpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/classes:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/hadoop-mapreduce-client-common-2.8.3.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.8.3.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-yarn-client-2.8.3.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-mapreduce-client-core-2.8.3.jar:/build/source/h
 adoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/target/hadoop-mapreduce-client-shuffle-2.8.3.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.8.3.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/
 org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/com/google/inject/guice/3.0/guice-3.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/jline/jline/0.9.94/jline-0.9.94.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/xmlenc/xmlenc/0.52/xmlenc
 -0.52.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.8.3.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-confi
 guration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/curator/curator-framework/2.7.1/curato
 r-framework-2.7.1.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/maven/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar -sourcepath /build/sou
 rce/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java -apidir /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/site/jdiff/xml -apiname Apache Hadoop MapReduce JobClient 2.8.3 -->
+<package name="org.apache.hadoop.mapred">
+</package>
+
+</api>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/38] hadoop git commit: HADOOP-15086. NativeAzureFileSystem file rename is not atomic. Contributed by Thomas Marquardt

Posted by ae...@apache.org.
HADOOP-15086. NativeAzureFileSystem file rename is not atomic.
Contributed by Thomas Marquardt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52babbb4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52babbb4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52babbb4

Branch: refs/heads/HDFS-7240
Commit: 52babbb4a0e3c89f2025bf6e9a1b51a96e8f8fb0
Parents: 76e664e
Author: Steve Loughran <st...@apache.org>
Authored: Fri Dec 22 11:39:55 2017 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Fri Dec 22 11:39:55 2017 +0000

----------------------------------------------------------------------
 .../fs/azure/AzureNativeFileSystemStore.java    | 16 +++--
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 25 +++++--
 .../fs/azure/NativeAzureFileSystemHelper.java   | 18 +++++
 .../hadoop/fs/azure/NativeFileSystemStore.java  |  4 ++
 .../fs/azure/SecureStorageInterfaceImpl.java    |  8 ++-
 .../hadoop/fs/azure/StorageInterface.java       |  2 +-
 .../hadoop/fs/azure/StorageInterfaceImpl.java   |  8 ++-
 .../azure/ITestNativeAzureFileSystemLive.java   | 72 ++++++++++++++++++++
 .../hadoop/fs/azure/MockStorageInterface.java   |  9 ++-
 9 files changed, 145 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index f1031b4..9396a51 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -2605,12 +2605,18 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
 
   @Override
   public void rename(String srcKey, String dstKey) throws IOException {
-    rename(srcKey, dstKey, false, null);
+    rename(srcKey, dstKey, false, null, true);
   }
 
   @Override
   public void rename(String srcKey, String dstKey, boolean acquireLease,
-      SelfRenewingLease existingLease) throws IOException {
+                     SelfRenewingLease existingLease) throws IOException {
+    rename(srcKey, dstKey, acquireLease, existingLease, true);
+  }
+
+    @Override
+  public void rename(String srcKey, String dstKey, boolean acquireLease,
+      SelfRenewingLease existingLease, boolean overwriteDestination) throws IOException {
 
     LOG.debug("Moving {} to {}", srcKey, dstKey);
 
@@ -2672,7 +2678,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
       // a more intensive exponential retry policy when the cluster is getting
       // throttled.
       try {
-        dstBlob.startCopyFromBlob(srcBlob, null, getInstrumentedContext());
+        dstBlob.startCopyFromBlob(srcBlob, null,
+            getInstrumentedContext(), overwriteDestination);
       } catch (StorageException se) {
         if (se.getHttpStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE) {
           int copyBlobMinBackoff = sessionConfiguration.getInt(
@@ -2695,7 +2702,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
           options.setRetryPolicyFactory(new RetryExponentialRetry(
             copyBlobMinBackoff, copyBlobDeltaBackoff, copyBlobMaxBackoff,
             copyBlobMaxRetries));
-          dstBlob.startCopyFromBlob(srcBlob, options, getInstrumentedContext());
+          dstBlob.startCopyFromBlob(srcBlob, options,
+              getInstrumentedContext(), overwriteDestination);
         } else {
           throw se;
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 85a46ea..3d44b20 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -3269,16 +3269,27 @@ public class NativeAzureFileSystem extends FileSystem {
     } else if (!srcMetadata.isDir()) {
       LOG.debug("Source {} found as a file, renaming.", src);
       try {
-        store.rename(srcKey, dstKey);
+        // HADOOP-15086 - file rename must ensure that the destination does
+        // not exist.  The fix is targeted to this call only to avoid
+        // regressions.  Other call sites are attempting to rename temporary
+        // files, redo a failed rename operation, or rename a directory
+        // recursively; for these cases the destination may exist.
+        store.rename(srcKey, dstKey, false, null,
+            false);
       } catch(IOException ex) {
-
         Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(ex);
 
-        if (innerException instanceof StorageException
-            && NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
-
-          LOG.debug("BlobNotFoundException encountered. Failing rename", src);
-          return false;
+        if (innerException instanceof StorageException) {
+          if (NativeAzureFileSystemHelper.isFileNotFoundException(
+              (StorageException) innerException)) {
+            LOG.debug("BlobNotFoundException encountered. Failing rename", src);
+            return false;
+          }
+          if (NativeAzureFileSystemHelper.isBlobAlreadyExistsConflict(
+              (StorageException) innerException)) {
+            LOG.debug("Destination BlobAlreadyExists. Failing rename", src);
+            return false;
+          }
         }
 
         throw ex;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
index 57af1f8..754f343 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.azure;
 
 import java.io.EOFException;
 import java.io.IOException;
+import java.net.HttpURLConnection;
 import java.util.Map;
 
 import com.google.common.base.Preconditions;
@@ -96,6 +97,23 @@ final class NativeAzureFileSystemHelper {
   }
 
   /*
+   * Determines if a conditional request failed because the blob already
+   * exists.
+   *
+   * @param e - the storage exception thrown by the failed operation.
+   *
+   * @return true if a conditional request failed because the blob already
+   * exists; otherwise, returns false.
+   */
+  static boolean isBlobAlreadyExistsConflict(StorageException e) {
+    if (e.getHttpStatusCode() == HttpURLConnection.HTTP_CONFLICT
+        && StorageErrorCodeStrings.BLOB_ALREADY_EXISTS.equals(e.getErrorCode())) {
+      return true;
+    }
+    return false;
+  }
+
+  /*
    * Helper method that logs stack traces from all live threads.
    */
   public static void logAllLiveStackTraces() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
index 57a729d..b67ab1b 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
@@ -91,6 +91,10 @@ interface NativeFileSystemStore {
   void rename(String srcKey, String dstKey, boolean acquireLease, SelfRenewingLease existingLease)
       throws IOException;
 
+  void rename(String srcKey, String dstKey, boolean acquireLease,
+              SelfRenewingLease existingLease, boolean overwriteDestination)
+      throws IOException;
+
   /**
    * Delete all keys with the given prefix. Used for testing.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
index 7c2722e..0f54249 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
@@ -503,10 +503,14 @@ public class SecureStorageInterfaceImpl extends StorageInterface {
 
     @Override
     public void startCopyFromBlob(CloudBlobWrapper sourceBlob, BlobRequestOptions options,
-        OperationContext opContext)
+        OperationContext opContext, boolean overwriteDestination)
             throws StorageException, URISyntaxException {
+      AccessCondition dstAccessCondition =
+          overwriteDestination
+              ? null
+              : AccessCondition.generateIfNotExistsCondition();
       getBlob().startCopy(sourceBlob.getBlob().getQualifiedUri(),
-          null, null, options, opContext);
+          null, dstAccessCondition, options, opContext);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
index e03d731..dbb3849 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
@@ -406,7 +406,7 @@ abstract class StorageInterface {
      *
      */
     public abstract void startCopyFromBlob(CloudBlobWrapper sourceBlob,
-        BlobRequestOptions options, OperationContext opContext)
+        BlobRequestOptions options, OperationContext opContext, boolean overwriteDestination)
         throws StorageException, URISyntaxException;
     
     /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
index 41a4dbb..e600f9e 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
@@ -425,10 +425,14 @@ class StorageInterfaceImpl extends StorageInterface {
 
     @Override
     public void startCopyFromBlob(CloudBlobWrapper sourceBlob, BlobRequestOptions options,
-        OperationContext opContext)
+        OperationContext opContext, boolean overwriteDestination)
             throws StorageException, URISyntaxException {
+      AccessCondition dstAccessCondition =
+          overwriteDestination
+              ? null
+              : AccessCondition.generateIfNotExistsCondition();
       getBlob().startCopy(sourceBlob.getBlob().getQualifiedUri(),
-          null, null, options, opContext);
+          null, dstAccessCondition, options, opContext);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java
index f969968..9033674 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java
@@ -18,8 +18,16 @@
 
 package org.apache.hadoop.fs.azure;
 
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
 
+import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
@@ -40,6 +48,70 @@ public class ITestNativeAzureFileSystemLive extends
     return AzureBlobStorageTestAccount.create();
   }
 
+  /**
+   * Tests the rename file operation to ensure that when there are multiple
+   * attempts to rename a file to the same destination, only one rename
+   * operation is successful (HADOOP-15086).
+   */
+  @Test
+  public void testMultipleRenameFileOperationsToSameDestination()
+      throws IOException, InterruptedException {
+    final CountDownLatch latch = new CountDownLatch(1);
+    final AtomicInteger successfulRenameCount = new AtomicInteger(0);
+    final AtomicReference<IOException> unexpectedError = new AtomicReference<IOException>();
+    final Path dest = path("dest");
+
+    // Run 10 threads to rename multiple files to the same target path
+    List<Thread> threads = new ArrayList<>();
+
+    for (int i = 0; i < 10; i++) {
+      final int threadNumber = i;
+      Path src = path("test" + threadNumber);
+      threads.add(new Thread(() -> {
+        try {
+          latch.await(Long.MAX_VALUE, TimeUnit.SECONDS);
+        } catch (InterruptedException e) {
+        }
+        try {
+          try (OutputStream output = fs.create(src)) {
+            output.write(("Source file number " + threadNumber).getBytes());
+          }
+
+          if (fs.rename(src, dest)) {
+            LOG.info("rename succeeded for thread " + threadNumber);
+            successfulRenameCount.incrementAndGet();
+          }
+        } catch (IOException e) {
+          unexpectedError.compareAndSet(null, e);
+          ContractTestUtils.fail("Exception unexpected", e);
+        }
+      }));
+    }
+
+    // Start each thread
+    threads.forEach(t -> t.start());
+
+    // Wait for threads to start and wait on latch
+    Thread.sleep(2000);
+
+    // Now start to rename
+    latch.countDown();
+
+    // Wait for all threads to complete
+    threads.forEach(t -> {
+      try {
+        t.join();
+      } catch (InterruptedException e) {
+      }
+    });
+
+    if (unexpectedError.get() != null) {
+      throw unexpectedError.get();
+    }
+    assertEquals(1, successfulRenameCount.get());
+    LOG.info("Success, only one rename operation succeeded!");
+  }
+
   @Test
   public void testLazyRenamePendingCanOverwriteExistingFile()
     throws Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
index e0ae7b4..d5f6437 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
@@ -425,7 +425,14 @@ public class MockStorageInterface extends StorageInterface {
 
     @Override
     public void startCopyFromBlob(CloudBlobWrapper sourceBlob, BlobRequestOptions options,
-        OperationContext opContext) throws StorageException, URISyntaxException {
+        OperationContext opContext, boolean overwriteDestination) throws StorageException, URISyntaxException {
+      if (!overwriteDestination && backingStore.exists(convertUriToDecodedString(uri))) {
+        throw new StorageException("BlobAlreadyExists",
+            "The blob already exists.",
+            HttpURLConnection.HTTP_CONFLICT,
+            null,
+            null);
+      }
       backingStore.copy(convertUriToDecodedString(sourceBlob.getUri()), convertUriToDecodedString(uri));
       //TODO: set the backingStore.properties.CopyState and
       //      update azureNativeFileSystemStore.waitForCopyToComplete


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/38] hadoop git commit: HADOOP-15149. CryptoOutputStream should implement StreamCapabilities.

Posted by ae...@apache.org.
HADOOP-15149. CryptoOutputStream should implement StreamCapabilities.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81127616
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81127616
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81127616

Branch: refs/heads/HDFS-7240
Commit: 81127616c571b7cd25686e60a1105f96ca0626b7
Parents: b82049b
Author: Xiao Chen <xi...@apache.org>
Authored: Fri Dec 29 13:40:42 2017 -0800
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri Dec 29 13:41:15 2017 -0800

----------------------------------------------------------------------
 .../hadoop/crypto/CryptoOutputStream.java       | 11 ++++-
 .../hadoop/crypto/CryptoStreamsTestBase.java    |  4 +-
 .../apache/hadoop/crypto/TestCryptoStreams.java | 47 +++++++++++++++++++-
 3 files changed, 57 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81127616/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
index 9fb0ff6..2f347c5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
@@ -26,6 +26,7 @@ import java.security.GeneralSecurityException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CanSetDropBehind;
+import org.apache.hadoop.fs.StreamCapabilities;
 import org.apache.hadoop.fs.Syncable;
 
 import com.google.common.base.Preconditions;
@@ -47,7 +48,7 @@ import com.google.common.base.Preconditions;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class CryptoOutputStream extends FilterOutputStream implements 
-    Syncable, CanSetDropBehind {
+    Syncable, CanSetDropBehind, StreamCapabilities {
   private final byte[] oneByteBuf = new byte[1];
   private final CryptoCodec codec;
   private final Encryptor encryptor;
@@ -304,4 +305,12 @@ public class CryptoOutputStream extends FilterOutputStream implements
     CryptoStreamUtils.freeDB(inBuffer);
     CryptoStreamUtils.freeDB(outBuffer);
   }
+
+  @Override
+  public boolean hasCapability(String capability) {
+    if (out instanceof StreamCapabilities) {
+      return ((StreamCapabilities) out).hasCapability(capability);
+    }
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81127616/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
index 259383d..a0eb105 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
@@ -50,9 +50,9 @@ public abstract class CryptoStreamsTestBase {
       CryptoStreamsTestBase.class);
 
   protected static CryptoCodec codec;
-  private static final byte[] key = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+  protected static final byte[] key = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
     0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16};
-  private static final byte[] iv = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+  protected static final byte[] iv = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
     0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08};
   
   protected static final int count = 10000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81127616/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
index 027ac93..2172d8a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
@@ -42,6 +42,10 @@ import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 public class TestCryptoStreams extends CryptoStreamsTestBase {
   /**
@@ -91,7 +95,7 @@ public class TestCryptoStreams extends CryptoStreamsTestBase {
   }
   
   private class FakeOutputStream extends OutputStream 
-      implements Syncable, CanSetDropBehind{
+      implements Syncable, CanSetDropBehind, StreamCapabilities{
     private final byte[] oneByteBuf = new byte[1];
     private final DataOutputBuffer out;
     private boolean closed;
@@ -153,7 +157,19 @@ public class TestCryptoStreams extends CryptoStreamsTestBase {
       checkStream();
       flush();
     }
-    
+
+    @Override
+    public boolean hasCapability(String capability) {
+      switch (capability.toLowerCase()) {
+      case StreamCapabilities.HFLUSH:
+      case StreamCapabilities.HSYNC:
+      case StreamCapabilities.DROPBEHIND:
+        return true;
+      default:
+        return false;
+      }
+    }
+
     private void checkStream() throws IOException {
       if (closed) {
         throw new IOException("Stream is closed!");
@@ -393,4 +409,31 @@ public class TestCryptoStreams extends CryptoStreamsTestBase {
       return ( ret <= 0 ) ? -1 : (oneByteBuf[0] & 0xff);
     }
   }
+
+  /**
+   * This tests {@link StreamCapabilities#hasCapability(String)} for the
+   * the underlying streams.
+   */
+  @Test(timeout = 120000)
+  public void testHasCapability() throws Exception {
+    // verify hasCapability returns what FakeOutputStream is set up for
+    CryptoOutputStream cos =
+        (CryptoOutputStream) getOutputStream(defaultBufferSize, key, iv);
+    assertTrue(cos instanceof StreamCapabilities);
+    assertTrue(cos.hasCapability(StreamCapabilities.HFLUSH));
+    assertTrue(cos.hasCapability(StreamCapabilities.HSYNC));
+    assertTrue(cos.hasCapability(StreamCapabilities.DROPBEHIND));
+    assertFalse(cos.hasCapability(StreamCapabilities.READAHEAD));
+    assertFalse(cos.hasCapability(StreamCapabilities.UNBUFFER));
+
+    // verify hasCapability for input stream
+    CryptoInputStream cis =
+        (CryptoInputStream) getInputStream(defaultBufferSize, key, iv);
+    assertTrue(cis instanceof StreamCapabilities);
+    assertTrue(cis.hasCapability(StreamCapabilities.DROPBEHIND));
+    assertTrue(cis.hasCapability(StreamCapabilities.READAHEAD));
+    assertTrue(cis.hasCapability(StreamCapabilities.UNBUFFER));
+    assertFalse(cis.hasCapability(StreamCapabilities.HFLUSH));
+    assertFalse(cis.hasCapability(StreamCapabilities.HSYNC));
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/38] hadoop git commit: HDFS-12949. Fix findbugs warning in ImageWriter.java.

Posted by ae...@apache.org.
HDFS-12949. Fix findbugs warning in ImageWriter.java.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ab632ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ab632ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ab632ba

Branch: refs/heads/HDFS-7240
Commit: 5ab632baf52f0ecc737845051b382f68bf1385bb
Parents: 382215c
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Dec 21 10:04:34 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Dec 21 10:04:34 2017 +0900

----------------------------------------------------------------------
 hadoop-tools/hadoop-fs2img/pom.xml | 10 ++++++++++
 1 file changed, 10 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ab632ba/hadoop-tools/hadoop-fs2img/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/pom.xml b/hadoop-tools/hadoop-fs2img/pom.xml
index 2e3e66a..1ae17dc 100644
--- a/hadoop-tools/hadoop-fs2img/pom.xml
+++ b/hadoop-tools/hadoop-fs2img/pom.xml
@@ -87,6 +87,16 @@
          </archive>
         </configuration>
        </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <findbugsXmlOutput>true</findbugsXmlOutput>
+          <xmlOutput>true</xmlOutput>
+          <excludeFilterFile>${basedir}/dev-support/findbugs-exclude.xml</excludeFilterFile>
+          <effort>Max</effort>
+        </configuration>
+      </plugin>
     </plugins>
   </build>
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/38] hadoop git commit: HADOOP-14965. S3a input stream "normal" fadvise mode to be adaptive

Posted by ae...@apache.org.
HADOOP-14965. S3a input stream "normal" fadvise mode to be adaptive


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ba491ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ba491ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ba491ff

Branch: refs/heads/HDFS-7240
Commit: 1ba491ff907fc5d2618add980734a3534e2be098
Parents: 13ad747
Author: Steve Loughran <st...@apache.org>
Authored: Wed Dec 20 18:25:33 2017 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Wed Dec 20 18:25:33 2017 +0000

----------------------------------------------------------------------
 .../apache/hadoop/fs/s3a/S3AInputStream.java    | 28 +++++++++++++++++---
 .../hadoop/fs/s3a/S3AInstrumentation.java       | 13 +++++++++
 .../src/site/markdown/tools/hadoop-aws/index.md | 13 ++++++++-
 .../scale/ITestS3AInputStreamPerformance.java   |  6 ++++-
 4 files changed, 54 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ba491ff/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
index 7e6d640..0074143 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
@@ -83,7 +83,7 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
   private final S3AInstrumentation.InputStreamStatistics streamStatistics;
   private S3AEncryptionMethods serverSideEncryptionAlgorithm;
   private String serverSideEncryptionKey;
-  private final S3AInputPolicy inputPolicy;
+  private S3AInputPolicy inputPolicy;
   private long readahead = Constants.DEFAULT_READAHEAD_RANGE;
   private final Invoker invoker;
 
@@ -139,12 +139,22 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
     this.serverSideEncryptionAlgorithm =
         s3Attributes.getServerSideEncryptionAlgorithm();
     this.serverSideEncryptionKey = s3Attributes.getServerSideEncryptionKey();
-    this.inputPolicy = inputPolicy;
+    setInputPolicy(inputPolicy);
     setReadahead(readahead);
     this.invoker = invoker;
   }
 
   /**
+   * Set/update the input policy of the stream.
+   * This updates the stream statistics.
+   * @param inputPolicy new input policy.
+   */
+  private void setInputPolicy(S3AInputPolicy inputPolicy) {
+    this.inputPolicy = inputPolicy;
+    streamStatistics.inputPolicySet(inputPolicy.ordinal());
+  }
+
+  /**
    * Opens up the stream at specified target position and for given length.
    *
    * @param reason reason for reopen
@@ -162,8 +172,9 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
     contentRangeFinish = calculateRequestLimit(inputPolicy, targetPos,
         length, contentLength, readahead);
     LOG.debug("reopen({}) for {} range[{}-{}], length={}," +
-        " streamPosition={}, nextReadPosition={}",
-        uri, reason, targetPos, contentRangeFinish, length,  pos, nextReadPos);
+        " streamPosition={}, nextReadPosition={}, policy={}",
+        uri, reason, targetPos, contentRangeFinish, length,  pos, nextReadPos,
+        inputPolicy);
 
     long opencount = streamStatistics.streamOpened();
     GetObjectRequest request = new GetObjectRequest(bucket, key)
@@ -274,6 +285,12 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
     } else if (diff < 0) {
       // backwards seek
       streamStatistics.seekBackwards(diff);
+      // if the stream is in "Normal" mode, switch to random IO at this
+      // point, as it is indicative of columnar format IO
+      if (inputPolicy.equals(S3AInputPolicy.Normal)) {
+        LOG.info("Switching to Random IO seek policy");
+        setInputPolicy(S3AInputPolicy.Random);
+      }
     } else {
       // targetPos == pos
       if (remainingInCurrentRequest() > 0) {
@@ -443,6 +460,7 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
       try {
         // close or abort the stream
         closeStream("close() operation", this.contentRangeFinish, false);
+        LOG.debug("Statistics of stream {}\n{}", key, streamStatistics);
         // this is actually a no-op
         super.close();
       } finally {
@@ -713,6 +731,8 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
       break;
 
     case Normal:
+      // normal is considered sequential until a backwards seek switches
+      // it to 'Random'
     default:
       rangeLimit = contentLength;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ba491ff/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
index 0fbcc00..d843347 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
@@ -667,6 +667,8 @@ public class S3AInstrumentation implements Closeable, MetricsSource {
     public long readsIncomplete;
     public long bytesReadInClose;
     public long bytesDiscardedInAbort;
+    public long policySetCount;
+    public long inputPolicy;
 
     private InputStreamStatistics() {
     }
@@ -783,6 +785,15 @@ public class S3AInstrumentation implements Closeable, MetricsSource {
     }
 
     /**
+     * The input policy has been switched.
+     * @param updatedPolicy enum value of new policy.
+     */
+    public void inputPolicySet(int updatedPolicy) {
+      policySetCount++;
+      inputPolicy = updatedPolicy;
+    }
+
+    /**
      * String operator describes all the current statistics.
      * <b>Important: there are no guarantees as to the stability
      * of this value.</b>
@@ -813,6 +824,8 @@ public class S3AInstrumentation implements Closeable, MetricsSource {
       sb.append(", ReadsIncomplete=").append(readsIncomplete);
       sb.append(", BytesReadInClose=").append(bytesReadInClose);
       sb.append(", BytesDiscardedInAbort=").append(bytesDiscardedInAbort);
+      sb.append(", InputPolicy=").append(inputPolicy);
+      sb.append(", InputPolicySetCount=").append(policySetCount);
       sb.append('}');
       return sb.toString();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ba491ff/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index fbcd54a..7eebf5c 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -1553,7 +1553,18 @@ backward seeks.
 
 *"normal" (default)*
 
-This is currently the same as "sequential", though it may evolve in future.
+The "Normal" policy starts off reading a file  in "sequential" mode,
+but if the caller seeks backwards in the stream, it switches from
+sequential to "random".
+
+This policy effectively recognizes the initial read pattern of columnar
+storage formats (e.g. Apache ORC and Apache Parquet), which seek to the end
+of a file, read in index data and then seek backwards to selectively read
+columns. The first seeks may be be expensive compared to the random policy,
+however the overall process is much less expensive than either sequentially
+reading through a file with the "random" policy, or reading columnar data
+with the "sequential" policy. When the exact format/recommended
+seek policy of data are known in advance, this policy
 
 *"random"*
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ba491ff/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
index 83ab210..efd96c4 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
@@ -427,7 +427,11 @@ public class ITestS3AInputStreamPerformance extends S3AScaleTestBase {
     long expectedOpenCount = RANDOM_IO_SEQUENCE.length;
     executeRandomIO(S3AInputPolicy.Normal, expectedOpenCount);
     assertEquals("streams aborted in " + streamStatistics,
-        4, streamStatistics.aborted);
+        1, streamStatistics.aborted);
+    assertEquals("policy changes in " + streamStatistics,
+        2, streamStatistics.policySetCount);
+    assertEquals("input policy in " + streamStatistics,
+        S3AInputPolicy.Random.ordinal(), streamStatistics.inputPolicy);
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/38] hadoop git commit: HDFS-12915. Fix findbugs warning in INodeFile$HeaderFormat.getBlockLayoutRedundancy. (Contributed by Chris Douglas)

Posted by ae...@apache.org.
HDFS-12915. Fix findbugs warning in INodeFile$HeaderFormat.getBlockLayoutRedundancy. (Contributed by Chris Douglas)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e3e1b8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e3e1b8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e3e1b8c

Branch: refs/heads/HDFS-7240
Commit: 6e3e1b8cde737e4c03b0f5279cab0239e7069a72
Parents: a55884c
Author: Lei Xu <le...@apache.org>
Authored: Fri Dec 29 12:21:57 2017 -0800
Committer: Lei Xu <le...@apache.org>
Committed: Fri Dec 29 12:21:57 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 71 +++++++++++++-------
 1 file changed, 46 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e3e1b8c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 3f2fb33..906a940 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -33,10 +33,11 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -45,7 +46,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
@@ -53,11 +53,11 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.LongBitFormat;
+import org.apache.hadoop.util.StringUtils;
+import static org.apache.hadoop.io.erasurecode.ErasureCodeConstants.REPLICATION_POLICY_ID;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
-import org.apache.hadoop.util.StringUtils;
 
 /** I-node for closed file. */
 @InterfaceAudience.Private
@@ -186,28 +186,49 @@ public class INodeFile extends INodeWithAdditionalFields
      * Construct block layout redundancy based on the given BlockType,
      * replication factor and EC PolicyID.
      */
-    static long getBlockLayoutRedundancy(final BlockType blockType,
-        final Short replication, final Byte erasureCodingPolicyID) {
-      long layoutRedundancy = 0;
-      if (blockType == STRIPED) {
-        Preconditions.checkArgument(replication == null &&
-            erasureCodingPolicyID != null);
-        Preconditions.checkArgument(ErasureCodingPolicyManager.getInstance()
-                .getByID(erasureCodingPolicyID) != null,
-            "Could not find EC policy with ID 0x" + StringUtils
-                .byteToHexString(erasureCodingPolicyID));
+    static long getBlockLayoutRedundancy(BlockType blockType,
+        Short replication, Byte erasureCodingPolicyID) {
+      if (null == erasureCodingPolicyID) {
+        erasureCodingPolicyID = REPLICATION_POLICY_ID;
+      }
+      long layoutRedundancy = 0xFF & erasureCodingPolicyID;
+      switch (blockType) {
+      case STRIPED:
+        if (replication != null) {
+          throw new IllegalArgumentException(
+              "Illegal replication for STRIPED block type");
+        }
+        if (erasureCodingPolicyID == REPLICATION_POLICY_ID) {
+          throw new IllegalArgumentException(
+              "Illegal REPLICATION policy for STRIPED block type");
+        }
+        if (null == ErasureCodingPolicyManager.getInstance()
+            .getByID(erasureCodingPolicyID)) {
+          throw new IllegalArgumentException(String.format(
+                "Could not find EC policy with ID 0x%02x",
+                erasureCodingPolicyID));
+        }
+
+        // valid parameters for STRIPED
         layoutRedundancy |= BLOCK_TYPE_MASK_STRIPED;
-        // Following bitwise OR with signed byte erasureCodingPolicyID is safe
-        // as the PolicyID can never be in negative.
-        layoutRedundancy |= erasureCodingPolicyID;
-      } else {
-        Preconditions.checkArgument(erasureCodingPolicyID == null ||
-                erasureCodingPolicyID ==
-                    ErasureCodeConstants.REPLICATION_POLICY_ID);
-        Preconditions.checkArgument(replication != null && replication >= 0 &&
-            replication <= MAX_REDUNDANCY,
-            "Invalid replication value " + replication);
+        break;
+      case CONTIGUOUS:
+        if (erasureCodingPolicyID != REPLICATION_POLICY_ID) {
+          throw new IllegalArgumentException(String.format(
+              "Illegal EC policy 0x%02x for CONTIGUOUS block type",
+              erasureCodingPolicyID));
+        }
+        if (null == replication ||
+            replication < 0 || replication > MAX_REDUNDANCY) {
+          throw new IllegalArgumentException("Invalid replication value "
+              + replication);
+        }
+
+        // valid parameters for CONTIGUOUS
         layoutRedundancy |= replication;
+        break;
+      default:
+        throw new IllegalArgumentException("Unknown blockType: " + blockType);
       }
       return layoutRedundancy;
     }
@@ -599,7 +620,7 @@ public class INodeFile extends INodeWithAdditionalFields
     if (isStriped()) {
       return HeaderFormat.getECPolicyID(header);
     }
-    return ErasureCodeConstants.REPLICATION_POLICY_ID;
+    return REPLICATION_POLICY_ID;
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/38] hadoop git commit: HADOOP-15133. [JDK9] Ignore com.sun.javadoc.* and com.sun.tools.* in animal-sniffer-maven-plugin to compile with Java 9.

Posted by ae...@apache.org.
HADOOP-15133. [JDK9] Ignore com.sun.javadoc.* and com.sun.tools.* in animal-sniffer-maven-plugin to compile with Java 9.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2d8f4ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2d8f4ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2d8f4ae

Branch: refs/heads/HDFS-7240
Commit: d2d8f4aeb3e214d1a96eeaf96bbe1e9301824ccd
Parents: 5ab632b
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Dec 21 11:58:34 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Dec 21 11:58:34 2017 +0900

----------------------------------------------------------------------
 hadoop-project/pom.xml | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2d8f4ae/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index efc8c2d..3c49182 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1569,6 +1569,8 @@
             <ignore>sun.misc.*</ignore>
             <ignore>sun.net.*</ignore>
             <ignore>sun.nio.ch.*</ignore>
+            <ignore>com.sun.javadoc.*</ignore>
+            <ignore>com.sun.tools.*</ignore>
           </ignores>
         </configuration>
       </plugin>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/38] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.8.3.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.8.3.xml
new file mode 100644
index 0000000..6826c25
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.8.3.xml
@@ -0,0 +1,2665 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:29:28 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop YARN Common 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-annotations.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/jdiff.jar -verbose -classpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/classes:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.8.3.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.2
 6.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annota
 tions-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/build/source/hadoop-yarn-project/h
 adoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.8.3.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson
 -xc-1.9.13.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/maven/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/com/google/inject/guice/3.0/guice-3.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1
 /jaxb-impl-2.2.3-1.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar -sourcepath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java -apidir /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/site/jdiff/xml -apiname Apache Hadoop YARN Common 2.8.3 -->
+<package name="org.apache.hadoop.yarn">
+  <!-- start class org.apache.hadoop.yarn.ContainerLogAppender -->
+  <class name="ContainerLogAppender" extends="org.apache.log4j.FileAppender"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Flushable"/>
+    <constructor name="ContainerLogAppender"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="activateOptions"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="append"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+    </method>
+    <method name="flush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerLogDir" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Getter/Setter methods for log4j.]]>
+      </doc>
+    </method>
+    <method name="setContainerLogDir"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogDir" type="java.lang.String"/>
+    </method>
+    <method name="getContainerLogFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setContainerLogFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogFile" type="java.lang.String"/>
+    </method>
+    <method name="getTotalLogFileSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setTotalLogFileSize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="logSize" type="long"/>
+    </method>
+    <doc>
+    <![CDATA[A simple log4j-appender for container's logs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.ContainerLogAppender -->
+  <!-- start class org.apache.hadoop.yarn.ContainerRollingLogAppender -->
+  <class name="ContainerRollingLogAppender" extends="org.apache.log4j.RollingFileAppender"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Flushable"/>
+    <constructor name="ContainerRollingLogAppender"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="activateOptions"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="flush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerLogDir" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Getter/Setter methods for log4j.]]>
+      </doc>
+    </method>
+    <method name="setContainerLogDir"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogDir" type="java.lang.String"/>
+    </method>
+    <method name="getContainerLogFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setContainerLogFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogFile" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[A simple log4j-appender for container's logs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.ContainerRollingLogAppender -->
+  <!-- start class org.apache.hadoop.yarn.YarnUncaughtExceptionHandler -->
+  <class name="YarnUncaughtExceptionHandler" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Thread.UncaughtExceptionHandler"/>
+    <constructor name="YarnUncaughtExceptionHandler"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="uncaughtException"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="t" type="java.lang.Thread"/>
+      <param name="e" type="java.lang.Throwable"/>
+    </method>
+    <doc>
+    <![CDATA[This class is intended to be installed by calling 
+ {@link Thread#setDefaultUncaughtExceptionHandler(UncaughtExceptionHandler)}
+ In the main entry point.  It is intended to try and cleanly shut down
+ programs using the Yarn Event framework.
+ 
+ Note: Right now it only will shut down the program if a Error is caught, but
+ not any other exception.  Anything else is just logged.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.YarnUncaughtExceptionHandler -->
+</package>
+<package name="org.apache.hadoop.yarn.api">
+</package>
+<package name="org.apache.hadoop.yarn.client">
+  <!-- start class org.apache.hadoop.yarn.client.AHSProxy -->
+  <class name="AHSProxy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AHSProxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAHSProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="ahsAddress" type="java.net.InetSocketAddress"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="rmAddress" type="java.net.InetSocketAddress"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.AHSProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.ClientRMProxy -->
+  <class name="ClientRMProxy" extends="org.apache.hadoop.yarn.client.RMProxy"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="createRMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="configuration" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a proxy to the ResourceManager for the specified protocol.
+ @param configuration Configuration with all the required information.
+ @param protocol Client protocol for which proxy is being requested.
+ @param <T> Type of proxy.
+ @return Proxy to the ResourceManager for the specified client protocol.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRMDelegationTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the token service name to be used for RMDelegationToken. Depending
+ on whether HA is enabled or not, this method generates the appropriate
+ service name as a comma-separated list of service addresses.
+
+ @param conf Configuration corresponding to the cluster we need the
+             RMDelegationToken for
+ @return - Service name for RMDelegationToken]]>
+      </doc>
+    </method>
+    <method name="getAMRMTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="address" type="java.lang.String"/>
+      <param name="defaultAddr" type="java.lang.String"/>
+      <param name="defaultPort" type="int"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.ClientRMProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.NMProxy -->
+  <class name="NMProxy" extends="org.apache.hadoop.yarn.client.ServerProxy"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMProxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createNMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="rpc" type="org.apache.hadoop.yarn.ipc.YarnRPC"/>
+      <param name="serverAddress" type="java.net.InetSocketAddress"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.NMProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.RMProxy -->
+  <class name="RMProxy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RMProxy"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="This method is deprecated and is not used by YARN internally any more.
+ To create a proxy to the RM, use ClientRMProxy#createRMProxy or
+ ServerRMProxy#createRMProxy.
+
+ Create a proxy to the ResourceManager at the specified address.">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="rmAddress" type="java.net.InetSocketAddress"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@deprecated
+ This method is deprecated and is not used by YARN internally any more.
+ To create a proxy to the RM, use ClientRMProxy#createRMProxy or
+ ServerRMProxy#createRMProxy.
+
+ Create a proxy to the ResourceManager at the specified address.
+
+ @param conf Configuration to generate retry policy
+ @param protocol Protocol for the proxy
+ @param rmAddress Address of the ResourceManager
+ @param <T> Type information of the proxy
+ @return Proxy to the RM
+ @throws IOException]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.RMProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.ServerProxy -->
+  <class name="ServerProxy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ServerProxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRetryPolicy" return="org.apache.hadoop.io.retry.RetryPolicy"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="maxWaitTimeStr" type="java.lang.String"/>
+      <param name="defMaxWaitTime" type="long"/>
+      <param name="connectRetryIntervalStr" type="java.lang.String"/>
+      <param name="defRetryInterval" type="long"/>
+    </method>
+    <method name="createRetriableProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="user" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="rpc" type="org.apache.hadoop.yarn.ipc.YarnRPC"/>
+      <param name="serverAddress" type="java.net.InetSocketAddress"/>
+      <param name="retryPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.ServerProxy -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api">
+  <!-- start class org.apache.hadoop.yarn.client.api.TimelineClient -->
+  <class name="TimelineClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Flushable"/>
+    <constructor name="TimelineClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createTimelineClient" return="org.apache.hadoop.yarn.client.api.TimelineClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a timeline client. The current UGI when the user initialize the
+ client will be used to do the put and the delegation token operations. The
+ current user may use {@link UserGroupInformation#doAs} another user to
+ construct and initialize a timeline client if the following operations are
+ supposed to be conducted by that user.
+
+ @return a timeline client]]>
+      </doc>
+    </method>
+    <method name="putEntities" return="org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entities" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEntity[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a number of conceptual entities to the timeline
+ server. It is a blocking API. The method will not return until it gets the
+ response from the timeline server.
+ </p>
+ 
+ @param entities
+          the collection of {@link TimelineEntity}
+ @return the error information if the sent entities are not correctly stored
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="putEntities" return="org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="groupId" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId"/>
+      <param name="entities" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEntity[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a number of conceptual entities to the timeline
+ server. It is a blocking API. The method will not return until it gets the
+ response from the timeline server.
+
+ This API is only for timeline service v1.5
+ </p>
+
+ @param appAttemptId {@link ApplicationAttemptId}
+ @param groupId {@link TimelineEntityGroupId}
+ @param entities
+          the collection of {@link TimelineEntity}
+ @return the error information if the sent entities are not correctly stored
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="putDomain"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="domain" type="org.apache.hadoop.yarn.api.records.timeline.TimelineDomain"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a domain to the timeline server. It is a
+ blocking API. The method will not return until it gets the response from
+ the timeline server.
+ </p>
+ 
+ @param domain
+          an {@link TimelineDomain} object
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="putDomain"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="domain" type="org.apache.hadoop.yarn.api.records.timeline.TimelineDomain"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a domain to the timeline server. It is a
+ blocking API. The method will not return until it gets the response from
+ the timeline server.
+
+ This API is only for timeline service v1.5
+ </p>
+
+ @param domain
+          an {@link TimelineDomain} object
+ @param appAttemptId {@link ApplicationAttemptId}
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a delegation token so as to be able to talk to the timeline server in a
+ secure way.
+ </p>
+ 
+ @param renewer
+          Address of the renewer who can renew these tokens when needed by
+          securely talking to the timeline server
+ @return a delegation token ({@link Token}) that can be used to talk to the
+         timeline server
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="renewDelegationToken" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineDT" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Renew a timeline delegation token.
+ </p>
+ 
+ @param timelineDT
+          the delegation token to renew
+ @return the new expiration time
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="cancelDelegationToken"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineDT" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Cancel a timeline delegation token.
+ </p>
+ 
+ @param timelineDT
+          the delegation token to cancel
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A client library that can be used to post some information in terms of a
+ number of conceptual entities.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.TimelineClient -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api.impl">
+</package>
+<package name="org.apache.hadoop.yarn.event">
+  <!-- start class org.apache.hadoop.yarn.event.AbstractEvent -->
+  <class name="AbstractEvent" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.event.Event"/>
+    <constructor name="AbstractEvent" type="TYPE"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AbstractEvent" type="TYPE, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getTimestamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getType" return="TYPE"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Parent class of all the events. All events extend this class.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.event.AbstractEvent -->
+  <!-- start class org.apache.hadoop.yarn.event.AsyncDispatcher -->
+  <class name="AsyncDispatcher" extends="org.apache.hadoop.service.AbstractService"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.event.Dispatcher"/>
+    <constructor name="AsyncDispatcher"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AsyncDispatcher" type="java.util.concurrent.BlockingQueue"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="serviceInit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="serviceStart"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="setDrainEventsOnStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="serviceStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="dispatch"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.hadoop.yarn.event.Event"/>
+    </method>
+    <method name="register"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventType" type="java.lang.Class"/>
+      <param name="handler" type="org.apache.hadoop.yarn.event.EventHandler"/>
+    </method>
+    <method name="getEventHandler" return="org.apache.hadoop.yarn.event.EventHandler"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isEventThreadWaiting" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="isDrained" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <field name="eventDispatchers" type="java.util.Map"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Dispatches {@link Event}s in a separate thread. Currently only single thread
+ does that. Potentially there could be multiple channels for each event type
+ class and a thread pool can be used to dispatch the events.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.event.AsyncDispatcher -->
+  <!-- start interface org.apache.hadoop.yarn.event.Dispatcher -->
+  <interface name="Dispatcher"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getEventHandler" return="org.apache.hadoop.yarn.event.EventHandler"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="register"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventType" type="java.lang.Class"/>
+      <param name="handler" type="org.apache.hadoop.yarn.event.EventHandler"/>
+    </method>
+    <field name="DISPATCHER_EXIT_ON_ERROR_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_DISPATCHER_EXIT_ON_ERROR" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Event Dispatcher interface. It dispatches events to registered 
+ event handlers based on event types.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.event.Dispatcher -->
+  <!-- start interface org.apache.hadoop.yarn.event.Event -->
+  <interface name="Event"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getType" return="TYPE"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTimestamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Interface defining events api.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.event.Event -->
+  <!-- start interface org.apache.hadoop.yarn.event.EventHandler -->
+  <interface name="EventHandler"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="handle"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="event" type="T"/>
+    </method>
+    <doc>
+    <![CDATA[Interface for handling events of type T
+
+ @param <T> parameterized event of type T]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.event.EventHandler -->
+</package>
+<package name="org.apache.hadoop.yarn.factories">
+</package>
+<package name="org.apache.hadoop.yarn.factory.providers">
+</package>
+<package name="org.apache.hadoop.yarn.logaggregation">
+  <!-- start class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat -->
+  <class name="AggregatedLogFormat" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AggregatedLogFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat -->
+  <!-- start class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey -->
+  <class name="AggregatedLogFormat.LogKey" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="AggregatedLogFormat.LogKey"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AggregatedLogFormat.LogKey" type="org.apache.hadoop.yarn.api.records.ContainerId"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AggregatedLogFormat.LogKey" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey -->
+  <!-- start class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader -->
+  <class name="AggregatedLogFormat.LogReader" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AggregatedLogFormat.LogReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="getApplicationOwner" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns the owner of the application.
+ 
+ @return the application owner.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAcls" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns ACLs for the application. An empty map is returned if no ACLs are
+ found.
+ 
+ @return a map of the Application ACLs.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="next" return="java.io.DataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read the next key and return the value-stream.
+ 
+ @param key
+ @return the valueStream if there are more keys or null otherwise.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAcontainerLogs"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="writer" type="java.io.Writer"/>
+      <param name="logUploadedTime" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes all logs for a single container to the provided writer.
+ @param valueStream
+ @param writer
+ @param logUploadedTime
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAcontainerLogs"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="writer" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes all logs for a single container to the provided writer.
+ @param valueStream
+ @param writer
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAContainerLogsForALogType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <param name="logUploadedTime" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ all types for a single container.
+ 
+ @param valueStream
+ @param out
+ @param logUploadedTime
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAContainerLogsForALogType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ all types for a single container.
+ 
+ @param valueStream
+ @param out
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readContainerLogsForALogType" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <param name="logUploadedTime" type="long"/>
+      <param name="logType" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ the specific types for a single container.
+ @param valueStream
+ @param out
+ @param logUploadedTime
+ @param logType
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader -->
+</package>
+<package name="org.apache.hadoop.yarn.nodelabels">
+</package>
+<package name="org.apache.hadoop.yarn.nodelabels.event">
+</package>
+<package name="org.apache.hadoop.yarn.security">
+  <!-- start class org.apache.hadoop.yarn.security.AMRMTokenIdentifier -->
+  <class name="AMRMTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.AMRMTokenIdentifierProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND_NAME" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[AMRMTokenIdentifier is the TokenIdentifier to be used by
+ ApplicationMasters to authenticate to the ResourceManager.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.AMRMTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.AMRMTokenSelector -->
+  <class name="AMRMTokenSelector" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+    <constructor name="AMRMTokenSelector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.AMRMTokenSelector -->
+  <!-- start class org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo -->
+  <class name="ContainerManagerSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerManagerSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo -->
+  <!-- start class org.apache.hadoop.yarn.security.ContainerTokenIdentifier -->
+  <class name="ContainerTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext"
+      static="false" final="false" visibility="public"
+      deprecated="Use one of the other constructors instead.">
+      <doc>
+      <![CDATA[Creates a instance.
+
+ @param appSubmitter appSubmitter
+ @param containerID container ID
+ @param creationTime creation time
+ @param expiryTimeStamp expiry timestamp
+ @param hostName hostname
+ @param logAggregationContext log aggregation context
+ @param masterKeyId master key ID
+ @param priority priority
+ @param r resource needed by the container
+ @param rmIdentifier ResourceManager identifier
+ @deprecated Use one of the other constructors instead.]]>
+      </doc>
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext, java.lang.String, org.apache.hadoop.yarn.server.api.ContainerType"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, int, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext, java.lang.String, org.apache.hadoop.yarn.server.api.ContainerType"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor needed by RPC layer/SecretManager.]]>
+      </doc>
+    </constructor>
+    <method name="getContainerID" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationSubmitter" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNmHostAddress" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getResource" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getExpiryTimeStamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getMasterKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getCreationTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRMIdentifier" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the RMIdentifier of RM in which containers are allocated
+ @return RMIdentifier]]>
+      </doc>
+    </method>
+    <method name="getContainerType" return="org.apache.hadoop.yarn.server.api.ContainerType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the ContainerType of container to allocate.
+ @return ContainerType]]>
+      </doc>
+    </method>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.ContainerTokenIdentifierProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getLogAggregationContext" return="org.apache.hadoop.yarn.api.records.LogAggregationContext"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getVersion" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the Container version
+ @return container version]]>
+      </doc>
+    </method>
+    <method name="getNodeLabelExpression" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the node-label-expression in the original ResourceRequest]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[TokenIdentifier for a container. Encodes {@link ContainerId},
+ {@link Resource} needed by the container and the target NMs host-address.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.ContainerTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.ContainerTokenSelector -->
+  <class name="ContainerTokenSelector" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+    <constructor name="ContainerTokenSelector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.ContainerTokenSelector -->
+  <!-- start class org.apache.hadoop.yarn.security.NMTokenIdentifier -->
+  <class name="NMTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId, org.apache.hadoop.yarn.api.records.NodeId, java.lang.String, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="NMTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor needed by RPC/Secret manager]]>
+      </doc>
+    </constructor>
+    <method name="getApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationSubmitter" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.NMTokenIdentifierProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.NMTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.SchedulerSecurityInfo -->
+  <class name="SchedulerSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SchedulerSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.SchedulerSecurityInfo -->
+</package>
+<package name="org.apache.hadoop.yarn.security.admin">
+  <!-- start class org.apache.hadoop.yarn.security.admin.AdminSecurityInfo -->
+  <class name="AdminSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AdminSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.admin.AdminSecurityInfo -->
+</package>
+<package name="org.apache.hadoop.yarn.security.client">
+  <!-- start class org.apache.hadoop.yarn.security.client.BaseClientToAMTokenSecretManager -->
+  <class name="BaseClientToAMTokenSecretManager" extends="org.apache.hadoop.security.token.SecretManager"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="BaseClientToAMTokenSecretManager"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[A base {@link SecretManager} for AMs to extend and validate Client-RM tokens
+ issued to clients by the RM using the underlying master-key shared by RM to
+ the AMs on their launch. All the methods are called by either Hadoop RPC or
+ YARN, so this class is strictly for the purpose of inherit/extend and
+ register with Hadoop RPC.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.BaseClientToAMTokenSecretManager -->
+  <!-- start class org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo -->
+  <class name="ClientRMSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ClientRMSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo -->
+  <!-- start class org.apache.hadoop.yarn.security.client.ClientTimelineSecurityInfo -->
+  <class name="ClientTimelineSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ClientTimelineSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.ClientTimelineSecurityInfo -->
+  <!-- start class org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier -->
+  <class name="ClientToAMTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ClientToAMTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ClientToAMTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getApplicationAttemptID" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getClientName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.ClientToAMTokenIdentifierProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND_NAME" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager -->
+  <class name="ClientToAMTokenSecretManager" extends="org.apache.hadoop.yarn.security.client.BaseClientToAMTokenSecretManager"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ClientToAMTokenSecretManager" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId, byte[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="retrievePassword" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="identifier" type="org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier"/>
+      <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+    </method>
+    <method name="getMasterKey" return="javax.crypto.SecretKey"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptID" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+    </method>
+    <method name="setMasterKey"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="byte[]"/>
+    </method>
+    <doc>
+    <![CDATA[A simple {@link SecretManager} for AMs to validate Client-RM tokens issued to
+ clients by the RM using the underlying master-key shared by RM to the AMs on
+ their launch. All the methods are called by either Hadoop RPC or YARN, so
+ this class is strictly for the purpose of inherit/extend and register with
+ Hadoop RPC.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager -->
+  <!-- start class org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier -->
+  <class name="RMDelegationTokenIdentifier" extends="org.apache.hadoop.yarn.security.client.YARNDelegationTokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RMDelegationTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="RMDelegationTokenIdentifier" type="org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new delegation token identifier
+ @param owner the effective username of the token owner
+ @param renewer the username of the renewer
+ @param realUser the real username of the token owner]]>
+      </doc>
+    </constructor>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND_NAME" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Delegation Token Identifier that identifies the delegation tokens from the 
+ Resource Manager.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.client.RMDelegationTokenSelector -->
+  <class name="RMDelegationTokenSelector" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+    <constructor name="RMDelegationTokenSelector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.RMDelegationTokenSelector -->
+  <!-- start class org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier -->
+  <class name="TimelineDelegationTokenIdentifier" extends="org.apache.hadoop.yarn.security.client.YARNDelegationTokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineDelegationTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="TimelineDelegationTokenIdentifier" type="org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new timeline delegation token identifier
+
+ @param owner the effective username of the token owner
+ @param renewer the username of the renewer
+ @param realUser the real username of the token owner]]>
+      </doc>
+    </constructor>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND_NAME" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.client.TimelineDelegationTokenSelector -->
+  <class name="TimelineDelegationTokenSelector" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+    <constructor name="TimelineDelegationTokenSelector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.TimelineDelegationTokenSelector -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api">
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.client">
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.service">
+</package>
+<package name="org.apache.hadoop.yarn.sharedcache">
+  <!-- start interface org.apache.hadoop.yarn.sharedcache.SharedCacheChecksum -->
+  <interface name="SharedCacheChecksum"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="computeChecksum" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Calculate the checksum of the passed input stream.
+
+ @param in <code>InputStream</code> to be checksumed
+ @return the message digest of the input stream
+ @throws IOException]]>
+      </doc>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.sharedcache.SharedCacheChecksum -->
+  <!-- start class org.apache.hadoop.yarn.sharedcache.SharedCacheChecksumFactory -->
+  <class name="SharedCacheChecksumFactory" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SharedCacheChecksumFactory"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getChecksum" return="org.apache.hadoop.yarn.sharedcache.SharedCacheChecksum"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get a new <code>SharedCacheChecksum</code> object based on the configurable
+ algorithm implementation
+ (see <code>yarn.sharedcache.checksum.algo.impl</code>)
+
+ @return <code>SharedCacheChecksum</code> object]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.sharedcache.SharedCacheChecksumFactory -->
+</package>
+<package name="org.apache.hadoop.yarn.state">
+  <!-- start class org.apache.hadoop.yarn.state.InvalidStateTransitionException -->
+  <class name="InvalidStateTransitionException" extends="org.apache.hadoop.yarn.state.InvalidStateTransitonException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="InvalidStateTransitionException" type="java.lang.Enum, java.lang.Enum"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[The exception that happens when you call invalid state transition.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.state.InvalidStateTransitionException -->
+  <!-- start class org.apache.hadoop.yarn.state.InvalidStateTransitonException -->
+  <class name="InvalidStateTransitonException" extends="org.apache.hadoop.yarn.exceptions.YarnRuntimeException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="Use {@link InvalidStateTransitionException} instead.">
+    <constructor name="InvalidStateTransitonException" type="java.lang.Enum, java.lang.Enum"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getCurrentState" return="java.lang.Enum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getEvent" return="java.lang.Enum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[@deprecated Use {@link InvalidStateTransitionException} instead.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.state.InvalidStateTransitonException -->
+  <!-- start interface org.apache.hadoop.yarn.state.MultipleArcTransition -->
+  <interface name="MultipleArcTransition"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="transition" return="STATE"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="operand" type="OPERAND"/>
+      <param name="event" type="EVENT"/>
+      <doc>
+      <![CDATA[Transition hook.
+ @return the postState. Post state must be one of the 
+                      valid post states registered in StateMachine.
+ @param operand the entity attached to the FSM, whose internal 
+                state may change.
+ @param event causal event]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Hook for Transition. 
+ Post state is decided by Transition hook. Post state must be one of the 
+ valid post states registered in StateMachine.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.state.MultipleArcTransition -->
+  <!-- start interface org.apache.hadoop.yarn.state.SingleArcTransition -->
+  <interface name="SingleArcTransition"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="transition"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="operand" type="OPERAND"/>
+      <param name="event" type="EVENT"/>
+      <doc>
+      <![CDATA[Transition hook.
+ 
+ @param operand the entity attached to the FSM, whose internal 
+                state may change.
+ @param event causal event]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Hook for Transition. This lead to state machine to move to 
+ the post state as registered in the state machine.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.state.SingleArcTransition -->
+  <!-- start interface org.apache.hadoop.yarn.state.StateMachine -->
+  <interface name="StateMachine"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/38] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.8.3.xml b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.8.3.xml
new file mode 100644
index 0000000..bd7e69c
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.8.3.xml
@@ -0,0 +1,38433 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:00:41 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop Common 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-common-project/hadoop-common/target/hadoop-annotations.jar:/build/source/hadoop-common-project/hadoop-common/target/jdiff.jar -verbose -classpath /build/source/hadoop-common-project/hadoop-common/target/classes:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-net/commons-
 net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/
 asm-3.2.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy
 -java/1.0.4.1/snappy-java-1.0.4.1.jar:/maven/org/apache/ant/ant/1.8.1/ant-1.8.1.jar:/maven/org/apache/ant/ant-launcher/1.8.1/ant-launcher-1.8.1.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/maven/org/apache/directory/api/api-
 i18n/1.0.0-M20/api-i18n-1.0.0-M20.jar:/maven/org/apache/directory/api/api-ldap-model/1.0.0-M20/api-ldap-model-1.0.0-M20.jar:/maven/org/apache/mina/mina-core/2.0.0-M5/mina-core-2.0.0-M5.jar:/maven/net/sf/ehcache/ehcache-core/2.4.4/ehcache-core-2.4.4.jar:/maven/antlr/antlr/2.7.7/antlr-2.7.7.jar:/maven/org/apache/directory/api/api-asn1-ber/1.0.0-M20/api-asn1-ber-1.0.0-M20.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar -sourcepath /build/source/hadoop-common-proje
 ct/hadoop-common/src/main/java -apidir /build/source/hadoop-common-project/hadoop-common/target/site/jdiff/xml -apiname Apache Hadoop Common 2.8.3 -->
+<package name="org.apache.hadoop">
+  <!-- start class org.apache.hadoop.HadoopIllegalArgumentException -->
+  <class name="HadoopIllegalArgumentException" extends="java.lang.IllegalArgumentException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="HadoopIllegalArgumentException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs exception with the specified detail message. 
+ @param message detailed message.]]>
+      </doc>
+    </constructor>
+    <doc>
+    <![CDATA[Indicates that a method has been passed illegal or invalid argument. This
+ exception is thrown instead of IllegalArgumentException to differentiate the
+ exception thrown in Hadoop implementation from the one thrown in JDK.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.HadoopIllegalArgumentException -->
+</package>
+<package name="org.apache.hadoop.conf">
+  <!-- start interface org.apache.hadoop.conf.Configurable -->
+  <interface name="Configurable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Set the configuration to be used by this object.]]>
+      </doc>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the configuration used by this object.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Something that may be configured with a {@link Configuration}.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.conf.Configurable -->
+  <!-- start class org.apache.hadoop.conf.Configuration -->
+  <class name="Configuration" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Iterable"/>
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A new configuration.]]>
+      </doc>
+    </constructor>
+    <constructor name="Configuration" type="boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A new configuration where the behavior of reading from the default 
+ resources can be turned off.
+ 
+ If the parameter {@code loadDefaults} is false, the new instance
+ will not load resources from the default files. 
+ @param loadDefaults specifies whether to load from the default files]]>
+      </doc>
+    </constructor>
+    <constructor name="Configuration" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A new configuration with the same settings cloned from another.
+ 
+ @param other the configuration from which to clone settings.]]>
+      </doc>
+    </constructor>
+    <method name="addDeprecations"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="deltas" type="org.apache.hadoop.conf.Configuration.DeprecationDelta[]"/>
+      <doc>
+      <![CDATA[Adds a set of deprecated keys to the global deprecations.
+
+ This method is lockless.  It works by means of creating a new
+ DeprecationContext based on the old one, and then atomically swapping in
+ the new context.  If someone else updated the context in between us reading
+ the old context and swapping in the new one, we try again until we win the
+ race.
+
+ @param deltas   The deprecations to add.]]>
+      </doc>
+    </method>
+    <method name="addDeprecation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="use {@link #addDeprecation(String key, String newKey,
+      String customMessage)} instead">
+      <param name="key" type="java.lang.String"/>
+      <param name="newKeys" type="java.lang.String[]"/>
+      <param name="customMessage" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Adds the deprecated key to the global deprecation map.
+ It does not override any existing entries in the deprecation map.
+ This is to be used only by the developers in order to add deprecation of
+ keys, and attempts to call this method after loading resources once,
+ would lead to <tt>UnsupportedOperationException</tt>
+ 
+ If a key is deprecated in favor of multiple keys, they are all treated as 
+ aliases of each other, and setting any one of them resets all the others 
+ to the new value.
+
+ If you have multiple deprecation entries to add, it is more efficient to
+ use #addDeprecations(DeprecationDelta[] deltas) instead.
+ 
+ @param key
+ @param newKeys
+ @param customMessage
+ @deprecated use {@link #addDeprecation(String key, String newKey,
+      String customMessage)} instead]]>
+      </doc>
+    </method>
+    <method name="addDeprecation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="newKey" type="java.lang.String"/>
+      <param name="customMessage" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Adds the deprecated key to the global deprecation map.
+ It does not override any existing entries in the deprecation map.
+ This is to be used only by the developers in order to add deprecation of
+ keys, and attempts to call this method after loading resources once,
+ would lead to <tt>UnsupportedOperationException</tt>
+ 
+ If you have multiple deprecation entries to add, it is more efficient to
+ use #addDeprecations(DeprecationDelta[] deltas) instead.
+
+ @param key
+ @param newKey
+ @param customMessage]]>
+      </doc>
+    </method>
+    <method name="addDeprecation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="use {@link #addDeprecation(String key, String newKey)} instead">
+      <param name="key" type="java.lang.String"/>
+      <param name="newKeys" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Adds the deprecated key to the global deprecation map when no custom
+ message is provided.
+ It does not override any existing entries in the deprecation map.
+ This is to be used only by the developers in order to add deprecation of
+ keys, and attempts to call this method after loading resources once,
+ would lead to <tt>UnsupportedOperationException</tt>
+ 
+ If a key is deprecated in favor of multiple keys, they are all treated as 
+ aliases of each other, and setting any one of them resets all the others 
+ to the new value.
+ 
+ If you have multiple deprecation entries to add, it is more efficient to
+ use #addDeprecations(DeprecationDelta[] deltas) instead.
+
+ @param key Key that is to be deprecated
+ @param newKeys list of keys that take up the values of deprecated key
+ @deprecated use {@link #addDeprecation(String key, String newKey)} instead]]>
+      </doc>
+    </method>
+    <method name="addDeprecation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="newKey" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Adds the deprecated key to the global deprecation map when no custom
+ message is provided.
+ It does not override any existing entries in the deprecation map.
+ This is to be used only by the developers in order to add deprecation of
+ keys, and attempts to call this method after loading resources once,
+ would lead to <tt>UnsupportedOperationException</tt>
+ 
+ If you have multiple deprecation entries to add, it is more efficient to
+ use #addDeprecations(DeprecationDelta[] deltas) instead.
+
+ @param key Key that is to be deprecated
+ @param newKey key that takes up the value of deprecated key]]>
+      </doc>
+    </method>
+    <method name="isDeprecated" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <doc>
+      <![CDATA[checks whether the given <code>key</code> is deprecated.
+ 
+ @param key the parameter which is to be checked for deprecation
+ @return <code>true</code> if the key is deprecated and 
+         <code>false</code> otherwise.]]>
+      </doc>
+    </method>
+    <method name="setDeprecatedProperties"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Sets all deprecated properties that are not currently set but have a
+ corresponding new property that is set. Useful for iterating the
+ properties when all deprecated properties for currently set properties
+ need to be present.]]>
+      </doc>
+    </method>
+    <method name="reloadExistingConfigurations"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Reload existing configuration instances.]]>
+      </doc>
+    </method>
+    <method name="addDefaultResource"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a default resource. Resources are loaded in the order of the resources 
+ added.
+ @param name file name. File should be present in the classpath.]]>
+      </doc>
+    </method>
+    <method name="setRestrictSystemPropertiesDefault"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="boolean"/>
+    </method>
+    <method name="setRestrictSystemProperties"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a configuration resource. 
+ 
+ The properties of this resource will override properties of previously 
+ added resources, unless they were marked <a href="#Final">final</a>. 
+ 
+ @param name resource to be added, the classpath is examined for a file 
+             with that name.]]>
+      </doc>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="restrictedParser" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <doc>
+      <![CDATA[Add a configuration resource. 
+ 
+ The properties of this resource will override properties of previously 
+ added resources, unless they were marked <a href="#Final">final</a>. 
+ 
+ @param url url of the resource to be added, the local filesystem is 
+            examined directly to find the resource, without referring to 
+            the classpath.]]>
+      </doc>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="restrictedParser" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Add a configuration resource. 
+ 
+ The properties of this resource will override properties of previously 
+ added resources, unless they were marked <a href="#Final">final</a>. 
+ 
+ @param file file-path of resource to be added, the local filesystem is
+             examined directly to find the resource, without referring to 
+             the classpath.]]>
+      </doc>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="restrictedParser" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <doc>
+      <![CDATA[Add a configuration resource. 
+ 
+ The properties of this resource will override properties of previously 
+ added resources, unless they were marked <a href="#Final">final</a>. 
+ 
+ WARNING: The contents of the InputStream will be cached, by this method. 
+ So use this sparingly because it does increase the memory consumption.
+ 
+ @param in InputStream to deserialize the object from. In will be read from
+ when a get or set is called next.  After it is read the stream will be
+ closed.]]>
+      </doc>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="restrictedParser" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a configuration resource. 
+ 
+ The properties of this resource will override properties of previously 
+ added resources, unless they were marked <a href="#Final">final</a>. 
+ 
+ @param in InputStream to deserialize the object from.
+ @param name the name of the resource because InputStream.toString is not
+ very descriptive some times.]]>
+      </doc>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="restrictedParser" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param conf Configuration object from which to load properties]]>
+      </doc>
+    </method>
+    <method name="reloadConfiguration"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Reload configuration from previously added resources.
+
+ This method will clear all the configuration read from the added 
+ resources, and final parameters. This will make the resources to 
+ be read again before accessing the values. Values that are added
+ via set methods will overlay values read from the resources.]]>
+      </doc>
+    </method>
+    <method name="get" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property, <code>null</code> if
+ no such property exists. If the key is deprecated, it returns the value of
+ the first key which replaces the deprecated key and is not null.
+ 
+ Values are processed for <a href="#VariableExpansion">variable expansion</a> 
+ before being returned. 
+ 
+ @param name the property name, will be trimmed before get value.
+ @return the value of the <code>name</code> or its replacing property, 
+         or null if no such property exists.]]>
+      </doc>
+    </method>
+    <method name="setAllowNullValueProperties"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="boolean"/>
+      <doc>
+      <![CDATA[Set Configuration to allow keys without values during setup.  Intended
+ for use during testing.
+
+ @param val If true, will allow Configuration to store keys without values]]>
+      </doc>
+    </method>
+    <method name="setRestrictSystemProps"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="boolean"/>
+    </method>
+    <method name="onlyKeyExists" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Return existence of the <code>name</code> property, but only for
+ names which have no valid value, usually non-existent or commented
+ out in XML.
+
+ @param name the property name
+ @return true if the property <code>name</code> exists without value]]>
+      </doc>
+    </method>
+    <method name="getTrimmed" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a trimmed <code>String</code>, 
+ <code>null</code> if no such property exists. 
+ If the key is deprecated, it returns the value of
+ the first key which replaces the deprecated key and is not null
+ 
+ Values are processed for <a href="#VariableExpansion">variable expansion</a> 
+ before being returned. 
+ 
+ @param name the property name.
+ @return the value of the <code>name</code> or its replacing property, 
+         or null if no such property exists.]]>
+      </doc>
+    </method>
+    <method name="getTrimmed" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a trimmed <code>String</code>, 
+ <code>defaultValue</code> if no such property exists. 
+ See @{Configuration#getTrimmed} for more details.
+ 
+ @param name          the property name.
+ @param defaultValue  the property default value.
+ @return              the value of the <code>name</code> or defaultValue
+                      if it is not set.]]>
+      </doc>
+    </method>
+    <method name="getRaw" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property, without doing
+ <a href="#VariableExpansion">variable expansion</a>.If the key is 
+ deprecated, it returns the value of the first key which replaces 
+ the deprecated key and is not null.
+ 
+ @param name the property name.
+ @return the value of the <code>name</code> property or 
+         its replacing property and null if no such property exists.]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <code>value</code> of the <code>name</code> property. If 
+ <code>name</code> is deprecated or there is a deprecated name associated to it,
+ it sets the value to both names. Name will be trimmed before put into
+ configuration.
+ 
+ @param name property name.
+ @param value property value.]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <param name="source" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <code>value</code> of the <code>name</code> property. If 
+ <code>name</code> is deprecated, it also sets the <code>value</code> to
+ the keys that replace the deprecated key. Name will be trimmed before put
+ into configuration.
+
+ @param name property name.
+ @param value property value.
+ @param source the place that this configuration value came from 
+ (For debugging).
+ @throws IllegalArgumentException when the value or name is null.]]>
+      </doc>
+    </method>
+    <method name="unset"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Unset a previously set property.]]>
+      </doc>
+    </method>
+    <method name="setIfUnset"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Sets a property if it is currently unset.
+ @param name the property name
+ @param value the new value]]>
+      </doc>
+    </method>
+    <method name="get" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code>. If the key is deprecated,
+ it returns the value of the first key which replaces the deprecated key
+ and is not null.
+ If no such property exists,
+ then <code>defaultValue</code> is returned.
+ 
+ @param name property name, will be trimmed before get value.
+ @param defaultValue default value.
+ @return property value, or <code>defaultValue</code> if the property 
+         doesn't exist.]]>
+      </doc>
+    </method>
+    <method name="getInt" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="int"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as an <code>int</code>.
+   
+ If no such property exists, the provided default value is returned,
+ or if the specified value is not a valid <code>int</code>,
+ then an error is thrown.
+ 
+ @param name property name.
+ @param defaultValue default value.
+ @throws NumberFormatException when the value is invalid
+ @return property value as an <code>int</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="getInts" return="int[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a set of comma-delimited
+ <code>int</code> values.
+ 
+ If no such property exists, an empty array is returned.
+ 
+ @param name property name
+ @return property value interpreted as an array of comma-delimited
+         <code>int</code> values]]>
+      </doc>
+    </method>
+    <method name="setInt"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="int"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to an <code>int</code>.
+ 
+ @param name property name.
+ @param value <code>int</code> value of the property.]]>
+      </doc>
+    </method>
+    <method name="getLong" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="long"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>long</code>.  
+ If no such property exists, the provided default value is returned,
+ or if the specified value is not a valid <code>long</code>,
+ then an error is thrown.
+ 
+ @param name property name.
+ @param defaultValue default value.
+ @throws NumberFormatException when the value is invalid
+ @return property value as a <code>long</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="getLongBytes" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="long"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>long</code> or
+ human readable format. If no such property exists, the provided default
+ value is returned, or if the specified value is not a valid
+ <code>long</code> or human readable format, then an error is thrown. You
+ can use the following suffix (case insensitive): k(kilo), m(mega), g(giga),
+ t(tera), p(peta), e(exa)
+
+ @param name property name.
+ @param defaultValue default value.
+ @throws NumberFormatException when the value is invalid
+ @return property value as a <code>long</code>,
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="setLong"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to a <code>long</code>.
+ 
+ @param name property name.
+ @param value <code>long</code> value of the property.]]>
+      </doc>
+    </method>
+    <method name="getFloat" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="float"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>float</code>.  
+ If no such property exists, the provided default value is returned,
+ or if the specified value is not a valid <code>float</code>,
+ then an error is thrown.
+
+ @param name property name.
+ @param defaultValue default value.
+ @throws NumberFormatException when the value is invalid
+ @return property value as a <code>float</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="setFloat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="float"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to a <code>float</code>.
+ 
+ @param name property name.
+ @param value property value.]]>
+      </doc>
+    </method>
+    <method name="getDouble" return="double"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="double"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>double</code>.  
+ If no such property exists, the provided default value is returned,
+ or if the specified value is not a valid <code>double</code>,
+ then an error is thrown.
+
+ @param name property name.
+ @param defaultValue default value.
+ @throws NumberFormatException when the value is invalid
+ @return property value as a <code>double</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="setDouble"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="double"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to a <code>double</code>.
+ 
+ @param name property name.
+ @param value property value.]]>
+      </doc>
+    </method>
+    <method name="getBoolean" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="boolean"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>boolean</code>.  
+ If no such property is specified, or if the specified value is not a valid
+ <code>boolean</code>, then <code>defaultValue</code> is returned.
+ 
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>boolean</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="setBoolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="boolean"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to a <code>boolean</code>.
+ 
+ @param name property name.
+ @param value <code>boolean</code> value of the property.]]>
+      </doc>
+    </method>
+    <method name="setBooleanIfUnset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="boolean"/>
+      <doc>
+      <![CDATA[Set the given property, if it is currently unset.
+ @param name property name
+ @param value new value]]>
+      </doc>
+    </method>
+    <method name="setEnum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="T"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to the given type. This
+ is equivalent to <code>set(&lt;name&gt;, value.toString())</code>.
+ @param name property name
+ @param value new value]]>
+      </doc>
+    </method>
+    <method name="getEnum" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="T"/>
+      <doc>
+      <![CDATA[Return value matching this enumerated type.
+ Note that the returned value is trimmed by this method.
+ @param name Property name
+ @param defaultValue Value returned if no mapping exists
+ @throws IllegalArgumentException If mapping is illegal for the type
+ provided]]>
+      </doc>
+    </method>
+    <method name="setTimeDuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="long"/>
+      <param name="unit" type="java.util.concurrent.TimeUnit"/>
+      <doc>
+      <![CDATA[Set the value of <code>name</code> to the given time duration. This
+ is equivalent to <code>set(&lt;name&gt;, value + &lt;time suffix&gt;)</code>.
+ @param name Property name
+ @param value Time duration
+ @param unit Unit of time]]>
+      </doc>
+    </method>
+    <method name="getTimeDuration" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="long"/>
+      <param name="unit" type="java.util.concurrent.TimeUnit"/>
+      <doc>
+      <![CDATA[Return time duration in the given time unit. Valid units are encoded in
+ properties as suffixes: nanoseconds (ns), microseconds (us), milliseconds
+ (ms), seconds (s), minutes (m), hours (h), and days (d).
+ @param name Property name
+ @param defaultValue Value returned if no mapping exists.
+ @param unit Unit to convert the stored property, if it exists.
+ @throws NumberFormatException If the property stripped of its unit is not
+         a number]]>
+      </doc>
+    </method>
+    <method name="getTimeDurations" return="long[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="unit" type="java.util.concurrent.TimeUnit"/>
+    </method>
+    <method name="getPattern" return="java.util.regex.Pattern"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.util.regex.Pattern"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>Pattern</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>Pattern</code>, then <code>DefaultValue</code> is returned.
+ Note that the returned value is NOT trimmed by this method.
+
+ @param name property name
+ @param defaultValue default value
+ @return property value as a compiled Pattern, or defaultValue]]>
+      </doc>
+    </method>
+    <method name="setPattern"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="pattern" type="java.util.regex.Pattern"/>
+      <doc>
+      <![CDATA[Set the given property to <code>Pattern</code>.
+ If the pattern is passed as null, sets the empty pattern which results in
+ further calls to getPattern(...) returning the default value.
+
+ @param name property name
+ @param pattern new value]]>
+      </doc>
+    </method>
+    <method name="getPropertySources" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Gets information about why a property was set.  Typically this is the 
+ path to the resource objects (file, URL, etc.) the property came from, but
+ it can also indicate that it was set programatically, or because of the
+ command line.
+
+ @param name - The property name to get the source of.
+ @return null - If the property or its source wasn't found. Otherwise, 
+ returns a list of the sources of the resource.  The older sources are
+ the first ones in the list.  So for example if a configuration is set from
+ the command line, and then written out to a file that is read back in the
+ first entry would indicate that it was set from the command line, while
+ the second one would indicate the file that the new configuration was read
+ in from.]]>
+      </doc>
+    </method>
+    <method name="getRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Parse the given attribute as a set of integer ranges
+ @param name the attribute name
+ @param defaultValue the default value if it is not set
+ @return a new set of ranges from the configured value]]>
+      </doc>
+    </method>
+    <method name="getStringCollection" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ a collection of <code>String</code>s.  
+ If no such property is specified then empty collection is returned.
+ <p>
+ This is an optimized version of {@link #getStrings(String)}
+ 
+ @param name property name.
+ @return property value as a collection of <code>String</code>s.]]>
+      </doc>
+    </method>
+    <method name="getStrings" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ an array of <code>String</code>s.  
+ If no such property is specified then <code>null</code> is returned.
+ 
+ @param name property name.
+ @return property value as an array of <code>String</code>s, 
+         or <code>null</code>.]]>
+      </doc>
+    </method>
+    <method name="getStrings" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ an array of <code>String</code>s.  
+ If no such property is specified then default value is returned.
+ 
+ @param name property name.
+ @param defaultValue The default value
+ @return property value as an array of <code>String</code>s, 
+         or default value.]]>
+      </doc>
+    </method>
+    <method name="getTrimmedStringCollection" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ a collection of <code>String</code>s, trimmed of the leading and trailing whitespace.  
+ If no such property is specified then empty <code>Collection</code> is returned.
+
+ @param name property name.
+ @return property value as a collection of <code>String</code>s, or empty <code>Collection</code>]]>
+      </doc>
+    </method>
+    <method name="getTrimmedStrings" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ an array of <code>String</code>s, trimmed of the leading and trailing whitespace.
+ If no such property is specified then an empty array is returned.
+ 
+ @param name property name.
+ @return property value as an array of trimmed <code>String</code>s, 
+         or empty array.]]>
+      </doc>
+    </method>
+    <method name="getTrimmedStrings" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ an array of <code>String</code>s, trimmed of the leading and trailing whitespace.
+ If no such property is specified then default value is returned.
+ 
+ @param name property name.
+ @param defaultValue The default value
+ @return property value as an array of trimmed <code>String</code>s, 
+         or default value.]]>
+      </doc>
+    </method>
+    <method name="setStrings"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="values" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Set the array of string values for the <code>name</code> property as 
+ as comma delimited values.  
+ 
+ @param name property name.
+ @param values The values]]>
+      </doc>
+    </method>
+    <method name="getPassword" return="char[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the value for a known password configuration element.
+ In order to enable the elimination of clear text passwords in config,
+ this method attempts to resolve the property name as an alias through
+ the CredentialProvider API and conditionally fallsback to config.
+ @param name property name
+ @return password]]>
+      </doc>
+    </method>
+    <method name="getPasswordFromCredentialProviders" return="char[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Try and resolve the provided element name as a credential provider
+ alias.
+ @param name alias of the provisioned credential
+ @return password or null if not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getPasswordFromConfig" return="char[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Fallback to clear text passwords in configuration.
+ @param name
+ @return clear text password or null]]>
+      </doc>
+    </method>
+    <method name="getSocketAddr" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hostProperty" type="java.lang.String"/>
+      <param name="addressProperty" type="java.lang.String"/>
+      <param name="defaultAddressValue" type="java.lang.String"/>
+      <param name="defaultPort" type="int"/>
+      <doc>
+      <![CDATA[Get the socket address for <code>hostProperty</code> as a
+ <code>InetSocketAddress</code>. If <code>hostProperty</code> is
+ <code>null</code>, <code>addressProperty</code> will be used. This
+ is useful for cases where we want to differentiate between host
+ bind address and address clients should use to establish connection.
+
+ @param hostProperty bind host property name.
+ @param addressProperty address property name.
+ @param defaultAddressValue the default value
+ @param defaultPort the default port
+ @return InetSocketAddress]]>
+      </doc>
+    </method>
+    <method name="getSocketAddr" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultAddress" type="java.lang.String"/>
+      <param name="defaultPort" type="int"/>
+      <doc>
+      <![CDATA[Get the socket address for <code>name</code> property as a
+ <code>InetSocketAddress</code>.
+ @param name property name.
+ @param defaultAddress the default value
+ @param defaultPort the default port
+ @return InetSocketAddress]]>
+      </doc>
+    </method>
+    <method name="setSocketAddr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetSocketAddress"/>
+      <doc>
+      <![CDATA[Set the socket address for the <code>name</code> property as
+ a <code>host:port</code>.]]>
+      </doc>
+    </method>
+    <method name="updateConnectAddr" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hostProperty" type="java.lang.String"/>
+      <param name="addressProperty" type="java.lang.String"/>
+      <param name="defaultAddressValue" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetSocketAddress"/>
+      <doc>
+      <![CDATA[Set the socket address a client can use to connect for the
+ <code>name</code> property as a <code>host:port</code>.  The wildcard
+ address is replaced with the local host's address. If the host and address
+ properties are configured the host component of the address will be combined
+ with the port component of the addr to generate the address.  This is to allow
+ optional control over which host name is used in multi-home bind-host
+ cases where a host can have multiple names
+ @param hostProperty the bind-host configuration name
+ @param addressProperty the service address configuration name
+ @param defaultAddressValue the service default address configuration value
+ @param addr InetSocketAddress of the service listener
+ @return InetSocketAddress for clients to connect]]>
+      </doc>
+    </method>
+    <method name="updateConnectAddr" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetSocketAddress"/>
+      <doc>
+      <![CDATA[Set the socket address a client can use to connect for the
+ <code>name</code> property as a <code>host:port</code>.  The wildcard
+ address is replaced with the local host's address.
+ @param name property name.
+ @param addr InetSocketAddress of a listener to store in the given property
+ @return InetSocketAddress for clients to connect]]>
+      </doc>
+    </method>
+    <method name="getClassByName" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+      <doc>
+      <![CDATA[Load a class by name.
+ 
+ @param name the class name.
+ @return the class object.
+ @throws ClassNotFoundException if the class is not found.]]>
+      </doc>
+    </method>
+    <method name="getClassByNameOrNull" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Load a class by name, returning null rather than throwing an exception
+ if it couldn't be loaded. This is to avoid the overhead of creating
+ an exception.
+ 
+ @param name the class name
+ @return the class object, or null if it could not be found.]]>
+      </doc>
+    </method>
+    <method name="getClasses" return="java.lang.Class[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.Class[]"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property
+ as an array of <code>Class</code>.
+ The value of the property specifies a list of comma separated class names.  
+ If no such property is specified, then <code>defaultValue</code> is 
+ returned.
+ 
+ @param name the property name.
+ @param defaultValue default value.
+ @return property value as a <code>Class[]</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="getClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>.  
+ If no such property is specified, then <code>defaultValue</code> is 
+ returned.
+ 
+ @param name the class name.
+ @param defaultValue default value.
+ @return property value as a <code>Class</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="getClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.Class"/>
+      <param name="xface" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>
+ implementing the interface specified by <code>xface</code>.
+   
+ If no such property is specified, then <code>defaultValue</code> is 
+ returned.
+ 
+ An exception is thrown if the returned class does not implement the named
+ interface. 
+ 
+ @param name the class name.
+ @param defaultValue default value.
+ @param xface the interface implemented by the named class.
+ @return property value as a <code>Class</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="getInstances" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="xface" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>List</code>
+ of objects implementing the interface specified by <code>xface</code>.
+ 
+ An exception is thrown if any of the classes does not exist, or if it does
+ not implement the named interface.
+ 
+ @param name the property name.
+ @param xface the interface implemented by the classes named by
+        <code>name</code>.
+ @return a <code>List</code> of objects implementing <code>xface</code>.]]>
+      </doc>
+    </method>
+    <method name="setClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="theClass" type="java.lang.Class"/>
+      <param name="xface" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to the name of a 
+ <code>theClass</code> implementing the given interface <code>xface</code>.
+ 
+ An exception is thrown if <code>theClass</code> does not implement the 
+ interface <code>xface</code>. 
+ 
+ @param name property name.
+ @param theClass property value.
+ @param xface the interface implemented by the named class.]]>
+      </doc>
+    </method>
+    <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dirsProp" type="java.lang.String"/>
+      <param name="path" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a local file under a directory named by <i>dirsProp</i> with
+ the given <i>path</i>.  If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code.  If the selected
+ directory does not exist, an attempt is made to create it.
+ 
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+      </doc>
+    </method>
+    <method name="getFile" return="java.io.File"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dirsProp" type="java.lang.String"/>
+      <param name="path" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a local file name under a directory named in <i>dirsProp</i> with
+ the given <i>path</i>.  If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code.  If the selected
+ directory does not exist, an attempt is made to create it.
+ 
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+      </doc>
+    </method>
+    <method name="getResource" return="java.net.URL"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the {@link URL} for the named resource.
+ 
+ @param name resource name.
+ @return the url for the named resource.]]>
+      </doc>
+    </method>
+    <method name="getConfResourceAsInputStream" return="java.io.InputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get an input stream attached to the configuration resource with the
+ given <code>name</code>.
+ 
+ @param name configuration resource name.
+ @return an input stream attached to the resource.]]>
+      </doc>
+    </method>
+    <method name="getConfResourceAsReader" return="java.io.Reader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get a {@link Reader} attached to the configuration resource with the
+ given <code>name</code>.
+ 
+ @param name configuration resource name.
+ @return a reader attached to the resource.]]>
+      </doc>
+    </method>
+    <method name="getFinalParameters" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the set of parameters marked final.
+
+ @return final parameter set.]]>
+      </doc>
+    </method>
+    <method name="getProps" return="java.util.Properties"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="size" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the number of keys in the configuration.
+
+ @return number of keys in the configuration.]]>
+      </doc>
+    </method>
+    <method name="clear"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Clears all keys from the configuration.]]>
+      </doc>
+    </method>
+    <method name="iterator" return="java.util.Iterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get an {@link Iterator} to go through the list of <code>String</code> 
+ key-value pairs in the configuration.
+ 
+ @return an iterator over the entries.]]>
+      </doc>
+    </method>
+    <method name="getPropsWithPrefix" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="confPrefix" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Constructs a mapping of configuration and includes all properties that
+ start with the specified configuration prefix.  Property names in the
+ mapping are trimmed to remove the configuration prefix.
+
+ @param confPrefix configuration prefix
+ @return mapping of configuration properties with prefix stripped]]>
+      </doc>
+    </method>
+    <method name="writeXml"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.OutputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write out the non-default properties in this configuration to the given
+ {@link OutputStream} using UTF-8 encoding.
+ 
+ @param out the output stream to write to.]]>
+      </doc>
+    </method>
+    <method name="writeXml"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="writeXml"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="propertyName" type="java.lang.String"/>
+      <param name="out" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+      <doc>
+      <![CDATA[Write out the non-default properties in this configuration to the
+ given {@link Writer}.
+
+ <li>
+ When property name is not empty and the property exists in the
+ configuration, this method writes the property and its attributes
+ to the {@link Writer}.
+ </li>
+ <p>
+
+ <li>
+ When property name is null or empty, this method writes all the
+ configuration properties and their attributes to the {@link Writer}.
+ </li>
+ <p>
+
+ <li>
+ When property name is not empty but the property doesn't exist in
+ the configuration, this method throws an {@link IllegalArgumentException}.
+ </li>
+ <p>
+ @param out the writer to write to.]]>
+      </doc>
+    </method>
+    <method name="dumpConfiguration"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="config" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="propertyName" type="java.lang.String"/>
+      <param name="out" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes properties and their attributes (final and resource)
+  to the given {@link Writer}.
+
+  <li>
+  When propertyName is not empty, and the property exists
+  in the configuration, the format of the output would be,
+  <pre>
+  {
+    "property": {
+      "key" : "key1",
+      "value" : "value1",
+      "isFinal" : "key1.isFinal",
+      "resource" : "key1.resource"
+    }
+  }
+  </pre>
+  </li>
+
+  <li>
+  When propertyName is null or empty, it behaves same as
+  {@link #dumpConfiguration(Configuration, Writer)}, the
+  output would be,
+  <pre>
+  { "properties" :
+      [ { key : "key1",
+          value : "value1",
+          isFinal : "key1.isFinal",
+          resource : "key1.resource" },
+        { key : "key2",
+          value : "value2",
+          isFinal : "ke2.isFinal",
+          resource : "key2.resource" }
+       ]
+   }
+  </pre>
+  </li>
+
+  <li>
+  When propertyName is not empty, and the property is not
+  found in the configuration, this method will throw an
+  {@link IllegalArgumentException}.
+  </li>
+  <p>
+ @param config the configuration
+ @param propertyName property name
+ @param out the Writer to write to
+ @throws IOException
+ @throws IllegalArgumentException when property name is not
+   empty and the property is not found in configuration]]>
+      </doc>
+    </method>
+    <method name="dumpConfiguration"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="config" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="out" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes out all properties and their attributes (final and resource) to
+  the given {@link Writer}, the format of the output would be,
+
+  <pre>
+  { "properties" :
+      [ { key : "key1",
+          value : "value1",
+          isFinal : "key1.isFinal",
+          resource : "key1.resource" },
+        { key : "key2",
+          value : "value2",
+          isFinal : "ke2.isFinal",
+          resource : "key2.resource" }
+       ]
+   }
+  </pre>
+
+  It does not output the properties of the configuration object which
+  is loaded from an input stream.
+  <p>
+
+ @param config the configuration
+ @param out the Writer to write to
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getClassLoader" return="java.lang.ClassLoader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link ClassLoader} for this job.
+
+ @return the correct class loader.]]>
+      </doc>
+    </method>
+    <method name="setClassLoader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="classLoader" type="java.lang.ClassLoader"/>
+      <doc>
+      <![CDATA[Set the class loader that will be used to load the various objects.
+ 
+ @param classLoader the new class loader.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setQuietMode"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="quietmode" type="boolean"/>
+      <doc>
+      <![CDATA[Set the quietness-mode. 
+ 
+ In the quiet-mode, error and informational messages might not be logged.
+ 
+ @param quietmode <code>true</code> to set quiet-mode on, <code>false</code>
+              to turn it off.]]>
+      </doc>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[For debugging.  List non-default properties to the terminal and exit.]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getValByRegex" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="regex" type="java.lang.String"/>
+      <doc>
+      <![CDATA[get keys matching the the regex 
+ @param regex
+ @return Map<String,String> with matching keys]]>
+      </doc>
+    </method>
+    <method name="dumpDeprecatedKeys"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hasWarnedDeprecation" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns whether or not a deprecated name has been warned. If the name is not
+ deprecated then always return false]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Provides access to configuration parameters.
+
+ <h4 id="Resources">Resources</h4>
+
+ <p>Configurations are specified by resources. A resource contains a set of
+ name/value pairs as XML data. Each resource is named by either a 
+ <code>String</code> or by a {@link Path}. If named by a <code>String</code>, 
+ then the classpath is examined for a file with that name.  If named by a 
+ <code>Path</code>, then the local filesystem is examined directly, without 
+ referring to the classpath.
+
+ <p>Unless explicitly turned off, Hadoop by default specifies two 
+ resources, loaded in-order from the classpath: <ol>
+ <li><tt>
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a></tt>: Read-only defaults for hadoop.</li>
+ <li><tt>core-site.xml</tt>: Site-specific configuration for a given hadoop
+ installation.</li>
+ </ol>
+ Applications may add additional resources, which are loaded
+ subsequent to these resources in the order they are added.
+ 
+ <h4 id="FinalParams">Final Parameters</h4>
+
+ <p>Configuration parameters may be declared <i>final</i>. 
+ Once a resource declares a value final, no subsequently-loaded 
+ resource can alter that value.  
+ For example, one might define a final parameter with:
+ <tt><pre>
+  &lt;property&gt;
+    &lt;name&gt;dfs.hosts.include&lt;/name&gt;
+    &lt;value&gt;/etc/hadoop/conf/hosts.include&lt;/value&gt;
+    <b>&lt;final&gt;true&lt;/final&gt;</b>
+  &lt;/property&gt;</pre></tt>
+
+ Administrators typically define parameters as final in 
+ <tt>core-site.xml</tt> for values that user applications may not alter.
+
+ <h4 id="VariableExpansion">Variable Expansion</h4>
+
+ <p>Value strings are first processed for <i>variable expansion</i>. The
+ available properties are:<ol>
+ <li>Other properties defined in this Configuration; and, if a name is
+ undefined here,</li>
+ <li>Properties in {@link System#getProperties()}.</li>
+ </ol>
+
+ <p>For example, if a configuration resource contains the following property
+ definitions: 
+ <tt><pre>
+  &lt;property&gt;
+    &lt;name&gt;basedir&lt;/name&gt;
+    &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt;
+  &lt;/property&gt;
+  
+  &lt;property&gt;
+    &lt;name&gt;tempdir&lt;/name&gt;
+    &lt;value&gt;${<i>basedir</i>}/tmp&lt;/value&gt;
+  &lt;/property&gt;</pre></tt>
+
+ When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt>
+ will be resolved to another property in this Configuration, while
+ <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value
+ of the System property with that name.
+ <p>When <tt>conf.get("otherdir")</tt> is called, then <tt>${<i>env.BASE_DIR</i>}</tt>
+ will be resolved to the value of the <tt>${<i>BASE_DIR</i>}</tt> environment variable.
+ It supports <tt>${<i>env.NAME:-default</i>}</tt> and <tt>${<i>env.NAME-default</i>}</tt> notations.
+ The former is resolved to "default" if <tt>${<i>NAME</i>}</tt> environment variable is undefined
+ or its value is empty.
+ The latter behaves the same way only if <tt>${<i>NAME</i>}</tt> is undefined.
+ <p>By default, warnings will be given to any deprecated configuration 
+ parameters and these are suppressible by configuring
+ <tt>log4j.logger.org.apache.hadoop.conf.Configuration.deprecation</tt> in
+ log4j.properties file.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.conf.Configuration -->
+  <!-- start class org.apache.hadoop.conf.Configured -->
+  <class name="Configured" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="Configured"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a Configured.]]>
+      </doc>
+    </constructor>
+    <constructor name="Configured" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a Configured.]]>
+      </doc>
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Base class for things that may be configured with a {@link Configuration}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.conf.Configured -->
+  <!-- start class org.apache.hadoop.conf.ReconfigurationTaskStatus -->
+  <class name="ReconfigurationTaskStatus" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReconfigurationTaskStatus" type="long, long, java.util.Map"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="hasTask" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return true if
+   - A reconfiguration task has finished or
+   - an active reconfiguration task is running]]>
+      </doc>
+    </method>
+    <method name="stopped" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return true if the latest reconfiguration task has finished and there is
+ no another active task running.]]>
+      </doc>
+    </method>
+    <method name="getStartTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getEndTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStatus" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.conf.ReconfigurationTaskStatus -->
+  <doc>
+  <![CDATA[Configuration of system parameters.]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.crypto">
+</package>
+<package name="org.apache.hadoop.crypto.key">
+  <!-- start class org.apache.hadoop.crypto.key.KeyProvider -->
+  <class name="KeyProvider" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="KeyProvider" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor.
+ 
+ @param conf configuration for the provider]]>
+      </doc>
+    </constructor>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the provider configuration.
+ 
+ @return the provider configuration]]>
+      </doc>
+    </method>
+    <method name="options" return="org.apache.hadoop.crypto.key.KeyProvider.Options"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[A helper function to create an options object.
+ @param conf the configuration to use
+ @return a new options object]]>
+      </doc>
+    </method>
+    <method name="isTransient" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Indicates whether this provider represents a store
+ that is intended for transient use - such as the UserProvider
+ is. These providers are generally used to provide access to
+ keying material rather than for long term storage.
+ @return true if transient, false otherwise]]>
+      </doc>
+    </method>
+    <method name="getKeyVersion" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="versionName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the key material for a specific version of the key. This method is used
+ when decrypting data.
+ @param versionName the name of a specific version of the key
+ @return the key material
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getKeys" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the key names for all keys.
+ @return the list of key names
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getKeysMetadata" return="org.apache.hadoop.crypto.key.KeyProvider.Metadata[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="names" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get key metadata in bulk.
+ @param names the names of the keys to get
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getKeyVersions" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the key material for all versions of a specific key name.
+ @return the list of key material
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getCurrentKey" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the current version of the key, which should be used for encrypting new
+ data.
+ @param name the base name of the key
+ @return the version name of the current version of the key or null if the
+    key version doesn't exist
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getMetadata" return="org.apache.hadoop.crypto.key.KeyProvider.Metadata"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get metadata about the key.
+ @param name the basename of the key
+ @return the key's metadata or null if the key doesn't exist
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createKey" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="material" type="byte[]"/>
+      <param name="options" type="org.apache.hadoop.crypto.key.KeyProvider.Options"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a new key. The given key must not already exist.
+ @param name the base name of the key
+ @param material the key material for the first version of the key.
+ @param options the options for the new key.
+ @return the version name of the first version of the key.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="generateKey" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="size" type="int"/>
+      <param name="algorithm" type="java.lang.String"/>
+      <exception name="NoSuchAlgorithmException" type="java.security.NoSuchAlgorithmException"/>
+      <doc>
+      <![CDATA[Generates a key material.
+
+ @param size length of the key.
+ @param algorithm algorithm to use for generating the key.
+ @return the generated key.
+ @throws NoSuchAlgorithmException]]>
+      </doc>
+    </method>
+    <method name="createKey" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="options" type="org.apache.hadoop.crypto.key.KeyProvider.Options"/>
+      <exception name="NoSuchAlgorithmException" type="java.security.NoSuchAlgorithmException"/>
+      <exception name="IOException" type="java.io

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/38] hadoop git commit: YARN-7674. Update Timeline Reader web app address in UI2. Contributed by Sunil G.

Posted by ae...@apache.org.
YARN-7674. Update Timeline Reader web app address in UI2. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13ad7479
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13ad7479
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13ad7479

Branch: refs/heads/HDFS-7240
Commit: 13ad7479b0e35a2c2d398e28c676871d9e672dc3
Parents: a78db99
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Wed Dec 20 22:26:49 2017 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Wed Dec 20 22:28:28 2017 +0530

----------------------------------------------------------------------
 .../hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js      | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13ad7479/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
index 55f6e1b..9d63de3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -24,7 +24,7 @@ function getTimeLineURL(rmhost) {
   var url = window.location.protocol + '//' +
     (ENV.hosts.localBaseAddress? ENV.hosts.localBaseAddress + '/' : '') + rmhost;
 
-  url += '/conf?name=yarn.timeline-service.webapp.address';
+  url += '/conf?name=yarn.timeline-service.reader.webapp.address';
   Ember.Logger.log("Get Timeline Address URL: " + url);
   return url;
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/38] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.8.3.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.8.3.xml
new file mode 100644
index 0000000..f3191e4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.8.3.xml
@@ -0,0 +1,829 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:32:45 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop YARN Server Common 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/hadoop-annotations.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/jdiff.jar -verbose -classpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/classes:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.8.3.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-n
 et/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/c
 ommons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-anno
 tations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/build/source/hadoo
 p-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.8.3.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.8.3.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/maven/com/google/inject/guice/3.0/guice-3.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/build/source/ha
 doop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar -sourcepath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java -apidir /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/site/jdiff/xml -apiname Apache Hadoop YARN Server Common 2.8.3 -->
+<package name="org.apache.hadoop.yarn.server">
+</package>
+<package name="org.apache.hadoop.yarn.server.api">
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.client">
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.service">
+</package>
+<package name="org.apache.hadoop.yarn.server.api.records">
+  <!-- start class org.apache.hadoop.yarn.server.api.records.NodeHealthStatus -->
+  <class name="NodeHealthStatus" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeHealthStatus"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getIsNodeHealthy" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Is the node healthy?
+ @return <code>true</code> if the node is healthy, else <code>false</code>]]>
+      </doc>
+    </method>
+    <method name="getHealthReport" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>diagnostic health report</em> of the node.
+ @return <em>diagnostic health report</em> of the node]]>
+      </doc>
+    </method>
+    <method name="getLastHealthReportTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>last timestamp</em> at which the health report was received.
+ @return <em>last timestamp</em> at which the health report was received]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@code NodeHealthStatus} is a summary of the health status of the node.
+ <p>
+ It includes information such as:
+ <ul>
+   <li>
+     An indicator of whether the node is healthy, as determined by the
+     health-check script.
+   </li>
+   <li>The previous time at which the health status was reported.</li>
+   <li>A diagnostic report on the health status.</li>
+ </ul>
+ 
+ @see NodeReport
+ @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.records.NodeHealthStatus -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api.records.impl.pb">
+</package>
+<package name="org.apache.hadoop.yarn.server.metrics">
+</package>
+<package name="org.apache.hadoop.yarn.server.records">
+</package>
+<package name="org.apache.hadoop.yarn.server.records.impl.pb">
+</package>
+<package name="org.apache.hadoop.yarn.server.security.http">
+</package>
+<package name="org.apache.hadoop.yarn.server.sharedcache">
+</package>
+<package name="org.apache.hadoop.yarn.server.utils">
+  <!-- start class org.apache.hadoop.yarn.server.utils.LeveldbIterator -->
+  <class name="LeveldbIterator" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.Iterator"/>
+    <implements name="java.io.Closeable"/>
+    <constructor name="LeveldbIterator" type="org.iq80.leveldb.DB"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an iterator for the specified database]]>
+      </doc>
+    </constructor>
+    <constructor name="LeveldbIterator" type="org.iq80.leveldb.DB, org.iq80.leveldb.ReadOptions"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an iterator for the specified database]]>
+      </doc>
+    </constructor>
+    <constructor name="LeveldbIterator" type="org.iq80.leveldb.DBIterator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an iterator using the specified underlying DBIterator]]>
+      </doc>
+    </constructor>
+    <method name="seek"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="byte[]"/>
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Repositions the iterator so the key of the next BlockElement
+ returned greater than or equal to the specified targetKey.]]>
+      </doc>
+    </method>
+    <method name="seekToFirst"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Repositions the iterator so is is at the beginning of the Database.]]>
+      </doc>
+    </method>
+    <method name="seekToLast"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Repositions the iterator so it is at the end of of the Database.]]>
+      </doc>
+    </method>
+    <method name="hasNext" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Returns <tt>true</tt> if the iteration has more elements.]]>
+      </doc>
+    </method>
+    <method name="next" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Returns the next element in the iteration.]]>
+      </doc>
+    </method>
+    <method name="peekNext" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Returns the next element in the iteration, without advancing the
+ iteration.]]>
+      </doc>
+    </method>
+    <method name="hasPrev" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[@return true if there is a previous entry in the iteration.]]>
+      </doc>
+    </method>
+    <method name="prev" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[@return the previous element in the iteration and rewinds the iteration.]]>
+      </doc>
+    </method>
+    <method name="peekPrev" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[@return the previous element in the iteration, without rewinding the
+ iteration.]]>
+      </doc>
+    </method>
+    <method name="remove"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Removes from the database the last element returned by the iterator.]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Closes the iterator.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A wrapper for a DBIterator to translate the raw RuntimeExceptions that
+ can be thrown into DBExceptions.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.LeveldbIterator -->
+</package>
+<package name="org.apache.hadoop.yarn.server.webapp">
+</package>
+<package name="org.apache.hadoop.yarn.server.webapp.dao">
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo -->
+  <class name="AppAttemptInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppAttemptInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AppAttemptInfo" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAppAttemptId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getHost" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRpcPort" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getOriginalTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDiagnosticsInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAppAttemptState" return="org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAmContainerId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStartedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinishedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="appAttemptId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="host" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="rpcPort" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="trackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="originalTrackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="diagnosticsInfo" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="appAttemptState" type="org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="amContainerId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="startedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="finishedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo -->
+  <class name="AppAttemptsInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppAttemptsInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo"/>
+    </method>
+    <method name="getAttempts" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="attempt" type="java.util.ArrayList"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppInfo -->
+  <class name="AppInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AppInfo" type="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAppId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getCurrentAppAttemptId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getQueue" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getType" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getHost" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRpcPort" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAppState" return="org.apache.hadoop.yarn.api.records.YarnApplicationState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRunningContainers" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedCpuVcores" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedMemoryMB" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDiagnosticsInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getOriginalTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinalAppStatus" return="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getSubmittedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStartedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinishedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getElapsedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationTags" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isUnmanagedApp" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPriority" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAppNodeLabelExpression" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAmNodeLabelExpression" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="appId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="currentAppAttemptId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="user" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="name" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="queue" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="type" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="host" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="rpcPort" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="appState" type="org.apache.hadoop.yarn.api.records.YarnApplicationState"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="runningContainers" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="progress" type="float"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="diagnosticsInfo" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="originalTrackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="trackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="finalAppStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="submittedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="startedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="finishedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="elapsedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="applicationTags" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="priority" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="unmanagedApplication" type="boolean"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppsInfo -->
+  <class name="AppsInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppsInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appinfo" type="org.apache.hadoop.yarn.server.webapp.dao.AppInfo"/>
+    </method>
+    <method name="getApps" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="app" type="java.util.ArrayList"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppsInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo -->
+  <class name="ContainerInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerInfo" type="org.apache.hadoop.yarn.api.records.ContainerReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getContainerId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedMB" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedVCores" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAssignedNodeId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPriority" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStartedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinishedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getElapsedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDiagnosticsInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getLogUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerExitStatus" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerState" return="org.apache.hadoop.yarn.api.records.ContainerState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNodeHttpAddress" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="containerId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="allocatedMB" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="allocatedVCores" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="assignedNodeId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="priority" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="startedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="finishedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="elapsedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="diagnosticsInfo" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="logUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="containerExitStatus" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="containerState" type="org.apache.hadoop.yarn.api.records.ContainerState"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="nodeHttpAddress" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo -->
+  <class name="ContainersInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainersInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerInfo" type="org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo"/>
+    </method>
+    <method name="getContainers" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="container" type="java.util.ArrayList"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo -->
+</package>
+
+</api>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org